sql/sql_class.cc (4,775 lines of code) (raw):
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*****************************************************************************
**
** This file implements classes defined in sql_class.h
** Especially the classes to handle a result from a select
**
*****************************************************************************/
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#include "binlog.h"
#include "sql_priv.h"
#include "unireg.h" // REQUIRED: for other includes
#include "sql_class.h"
#include "sql_cache.h" // query_cache_abort
#include "sql_base.h" // close_thread_tables
#include "sql_time.h" // date_time_format_copy
#include "sql_acl.h" // NO_ACCESS,
// acl_getroot_no_password
#include "sql_base.h" // close_temporary_tables
#include "sql_handler.h" // mysql_ha_cleanup
#include "rpl_rli.h"
#include "rpl_filter.h"
#include "rpl_record.h"
#include "rpl_slave.h"
#include <my_bitmap.h>
#include "log_event.h"
#include "sql_audit.h"
#include <m_ctype.h>
#include <sys/stat.h>
#include <thr_alarm.h>
#ifdef __WIN__
#include <io.h>
#endif
#include <mysys_err.h>
#include <limits.h>
#include "sp_rcontext.h"
#include "sp_cache.h"
#include "transaction.h"
#include "debug_sync.h"
#include "sql_parse.h" // is_update_query
#include "sql_callback.h"
#include "lock.h"
#include "global_threads.h"
#include "mysqld.h"
#include "sql_timer.h" // thd_timer_destroy
#include "srv_session.h"
#include <mysql/psi/mysql_statement.h>
#include <boost/optional.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#ifdef HAVE_RAPIDJSON
#include "rapidjson/document.h"
#include "rapidjson/writer.h"
#endif
#include <sstream>
#include <list>
#ifdef HAVE_REPLICATION
#include "rpl_rli_pdb.h" // Slave_worker
#include "rpl_slave_commit_order_manager.h"
#endif
#ifdef TARGET_OS_LINUX
#include <sys/syscall.h>
#endif // TARGET_OS_LINUX
using std::min;
using std::max;
using boost::property_tree::ptree;
/*
The following is used to initialise Table_ident with a internal
table name
*/
char internal_table_name[2]= "*";
char empty_c_string[1]= {0}; /* used for not defined db */
LEX_STRING EMPTY_STR= { (char *) "", 0 };
LEX_STRING NULL_STR= { NULL, 0 };
/* empty string */
static const std::string emptyStr = "";
const char * const THD::DEFAULT_WHERE= "field list";
extern void update_sql_stats(THD *thd, SHARED_SQL_STATS *cumulative_sql_stats,
const char *sub_query, uint input_length,
bool statement_completed);
/*
When the thread is killed, we store the reason why it is killed in this buffer
so that clients can understand why their query fails
*/
constexpr size_t KILLED_REASON_MAX_LEN = 128;
/****************************************************************************
** User variables
****************************************************************************/
extern "C" uchar *get_var_key(user_var_entry *entry, size_t *length,
my_bool not_used MY_ATTRIBUTE((unused)))
{
*length= entry->entry_name.length();
return (uchar*) entry->entry_name.ptr();
}
extern "C" void free_user_var(user_var_entry *entry)
{
entry->destroy();
}
bool Key_part_spec::operator==(Key_part_spec& other)
{
return length == other.length &&
!my_strcasecmp(system_charset_info, field_name.str,
other.field_name.str) &&
document_path_key_spec == other.document_path_key_spec;
}
/**
Construct an (almost) deep copy of this key. Only those
elements that are known to never change are not copied.
If out of memory, a partial copy is returned and an error is set
in THD.
*/
Key::Key(const Key &rhs, MEM_ROOT *mem_root)
:type(rhs.type),
key_create_info(rhs.key_create_info),
columns(rhs.columns, mem_root),
name(rhs.name),
generated(rhs.generated)
{
list_copy_and_replace_each_value(columns, mem_root);
}
/**
Construct an (almost) deep copy of this foreign key. Only those
elements that are known to never change are not copied.
If out of memory, a partial copy is returned and an error is set
in THD.
*/
Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
:Key(rhs, mem_root),
ref_db(rhs.ref_db),
ref_table(rhs.ref_table),
ref_columns(rhs.ref_columns, mem_root),
delete_opt(rhs.delete_opt),
update_opt(rhs.update_opt),
match_opt(rhs.match_opt)
{
list_copy_and_replace_each_value(ref_columns, mem_root);
}
/*
Test if a foreign key (= generated key) is a prefix of the given key
(ignoring key name, key type and order of columns)
NOTES:
This is only used to test if an index for a FOREIGN KEY exists
IMPLEMENTATION
We only compare field names
RETURN
0 Generated key is a prefix of other key
1 Not equal
*/
bool foreign_key_prefix(Key *a, Key *b)
{
/* Ensure that 'a' is the generated key */
if (a->generated)
{
if (b->generated && a->columns.elements > b->columns.elements)
swap_variables(Key*, a, b); // Put shorter key in 'a'
}
else
{
if (!b->generated)
return TRUE; // No foreign key
swap_variables(Key*, a, b); // Put generated key in 'a'
}
/* Test if 'a' is a prefix of 'b' */
if (a->columns.elements > b->columns.elements)
return TRUE; // Can't be prefix
List_iterator<Key_part_spec> col_it1(a->columns);
List_iterator<Key_part_spec> col_it2(b->columns);
Key_part_spec *col1, *col2;
#ifdef ENABLE_WHEN_INNODB_CAN_HANDLE_SWAPED_FOREIGN_KEY_COLUMNS
while ((col1= col_it1++))
{
bool found= 0;
col_it2.rewind();
while ((col2= col_it2++))
{
if (*col1 == *col2)
{
found= TRUE;
break;
}
}
if (!found)
return TRUE; // Error
}
return FALSE; // Is prefix
#else
while ((col1= col_it1++))
{
col2= col_it2++;
if (!(*col1 == *col2))
return TRUE;
}
return FALSE; // Is prefix
#endif
}
/****************************************************************************
** Thread specific functions
****************************************************************************/
/**
Get reference to scheduler data object
@param thd THD object
@retval Scheduler data object on THD
*/
void *thd_get_scheduler_data(THD *thd)
{
return thd->scheduler.data;
}
/**
Set reference to Scheduler data object for THD object
@param thd THD object
@param psi Scheduler data object to set on THD
*/
void thd_set_scheduler_data(THD *thd, void *data)
{
thd->scheduler.data= data;
}
/**
Get reference to Performance Schema object for THD object
@param thd THD object
@retval Performance schema object for thread on THD
*/
PSI_thread *thd_get_psi(THD *thd)
{
return thd->scheduler.m_psi;
}
/**
Get net_wait_timeout for THD object
@param thd THD object
@retval net_wait_timeout value for thread on THD
*/
ulong thd_get_net_wait_timeout(const THD* thd)
{
return thd->variables.net_wait_timeout_seconds;
}
/**
Set reference to Performance Schema object for THD object
@param thd THD object
@param psi Performance schema object for thread
*/
void thd_set_psi(THD *thd, PSI_thread *psi)
{
thd->scheduler.m_psi= psi;
}
/**
Set the state on connection to killed
@param thd THD object
*/
void thd_set_killed(THD *thd)
{
thd->killed= THD::KILL_CONNECTION;
}
/**
Clear errors from the previous THD
@param thd THD object
*/
void thd_clear_errors(THD *thd)
{
my_errno= 0;
thd->mysys_var->abort= 0;
}
/**
Set thread stack in THD object
@param thd Thread object
@param stack_start Start of stack to set in THD object
*/
void thd_set_thread_stack(THD *thd, const char *stack_start)
{
thd->thread_stack= stack_start;
}
/**
Lock connection data for the set of connections this connection
belongs to
@param thd THD object
*/
void thd_lock_thread_count(THD *)
{
mysql_mutex_lock(&LOCK_thread_count);
}
/**
Lock connection data for the set of connections this connection
belongs to
@param thd THD object
*/
void thd_unlock_thread_count(THD *)
{
mysql_cond_broadcast(&COND_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
}
/**
Close the socket used by this connection
@param thd THD object
*/
void thd_close_connection(THD *thd)
{
Vio* vio = thd->get_net()->vio;
if (vio)
vio_shutdown(vio);
}
/**
Get current THD object from thread local data
@retval The THD object for the thread, NULL if not connection thread
*/
THD *thd_get_current_thd()
{
return current_thd;
}
/**
Get iterator begin of global thread list
@retval Iterator begin of global thread list
*/
Thread_iterator thd_get_global_thread_list_begin()
{
return global_thread_list_begin();
}
/**
Get iterator end of global thread list
@retval Iterator end of global thread list
*/
Thread_iterator thd_get_global_thread_list_end()
{
return global_thread_list_end();
}
extern "C"
void thd_binlog_pos(const THD *thd,
const char **file_var,
unsigned long long *pos_var,
const char **gtid_var,
const char **max_gtid_var)
{
thd->get_trans_pos(file_var, pos_var, gtid_var, max_gtid_var);
}
void
thd_slave_gtid_info(const THD *thd,
void *slave_gtid_info)
{
if (thd) {
for (auto it: thd->get_slave_gtid_info()) {
static_cast<std::vector<st_slave_gtid_info>*>
(slave_gtid_info)->push_back(it);
}
}
}
/**
Set up various THD data for a new connection
thd_new_connection_setup
@note Must be called with LOCK_thread_count locked.
@param thd THD object
@param stack_start Start of stack for connection
*/
void thd_new_connection_setup(THD *thd, char *stack_start)
{
DBUG_ENTER("thd_new_connection_setup");
mutex_assert_owner_shard(SHARDED(&LOCK_thread_count), thd);
#ifdef HAVE_PSI_INTERFACE
thd_set_psi(thd,
PSI_THREAD_CALL(new_thread)
(key_thread_one_connection, thd, thd->thread_id()));
#endif
thd->set_time();
thd->prior_thr_create_utime= thd->thr_create_utime= thd->start_utime=
my_micro_time();
add_global_thread(thd);
mutex_unlock_shard(SHARDED(&LOCK_thread_count), thd);
DBUG_PRINT("info", ("init new connection. thd: 0x%lx fd: %d",
(ulong)thd, mysql_socket_getfd(thd->get_net()->vio->mysql_socket)));
thd_set_thread_stack(thd, stack_start);
DBUG_VOID_RETURN;
}
/**
Lock data that needs protection in THD object
@param thd THD object
*/
void thd_lock_data(THD *thd)
{
mysql_mutex_lock(&thd->LOCK_thd_data);
}
/**
Unlock data that needs protection in THD object
@param thd THD object
*/
void thd_unlock_data(THD *thd)
{
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
/**
Support method to check if connection has already started transcaction
@param client_cntx Low level client context
@retval TRUE if connection already started transaction
*/
bool thd_is_transaction_active(THD *thd)
{
return thd->transaction.is_active();
}
/**
Check if there is buffered data on the socket representing the connection
@param thd THD object
*/
int thd_connection_has_data(THD *thd)
{
Vio *vio= thd->get_net()->vio;
return vio->has_data(vio);
}
/**
Set reading/writing on socket, used by SHOW PROCESSLIST
@param thd THD object
@param val Value to set it to (0 or 1)
*/
void thd_set_net_read_write(THD *thd, uint val)
{
thd->get_net()->reading_or_writing= val;
}
/**
Get reading/writing on socket from THD object
@param thd THD object
@retval get_net()->reading_or_writing value for thread on THD.
*/
uint thd_get_net_read_write(THD *thd)
{
return thd->get_net()->reading_or_writing;
}
/**
Set reference to mysys variable in THD object
@param thd THD object
@param mysys_var Reference to set
*/
void thd_set_mysys_var(THD *thd, st_my_thread_var *mysys_var)
{
thd->set_mysys_var(mysys_var);
}
/**
Get socket file descriptor for this connection
@param thd THD object
@retval Socket of the connection
*/
my_socket thd_get_fd(THD *thd)
{
return mysql_socket_getfd(thd->get_net()->vio->mysql_socket);
}
/**
Set thread specific environment required for thd cleanup in thread pool.
@param thd THD object
@retval 1 if thread-specific enviroment could be set else 0
*/
int thd_store_globals(THD* thd)
{
return thd->store_globals();
}
/**
Get thread attributes for connection threads
@retval Reference to thread attribute for connection threads
*/
pthread_attr_t *get_connection_attrib(void)
{
return &connection_attrib;
}
/**
Get max number of connections
@retval Max number of connections for MySQL Server
*/
ulong get_max_connections(void)
{
return max_connections;
}
int mysql_tmpfile_path(const char *path, const char *prefix)
{
DBUG_ASSERT(path != NULL);
DBUG_ASSERT((strlen(path) + strlen(prefix)) <= FN_REFLEN);
char filename[FN_REFLEN];
File fd = create_temp_file(filename, path, prefix,
#ifdef __WIN__
O_BINARY | O_TRUNC | O_SEQUENTIAL |
O_SHORT_LIVED |
#endif /* __WIN__ */
O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY,
MYF(MY_WME));
if (fd >= 0) {
#ifndef __WIN__
/*
This can be removed once the following bug is fixed:
Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option
(file not removed) (Unix)
*/
unlink(filename);
#endif /* !__WIN__ */
}
return fd;
}
/*
The following functions form part of the C plugin API
*/
extern "C" int mysql_tmpfile(const char *prefix)
{
return mysql_tmpfile_path(mysql_tmpdir, prefix);
}
extern "C"
int thd_in_lock_tables(const THD *thd)
{
return MY_TEST(thd->in_lock_tables);
}
extern "C"
int thd_tablespace_op(const THD *thd)
{
return MY_TEST(thd->tablespace_op);
}
extern "C"
const char *set_thd_proc_info(void *thd_arg, const char *info,
const char *calling_function,
const char *calling_file,
const unsigned int calling_line)
{
PSI_stage_info old_stage;
PSI_stage_info new_stage;
old_stage.m_key= 0;
old_stage.m_name= info;
set_thd_stage_info(thd_arg, & old_stage, & new_stage,
calling_function, calling_file, calling_line);
return new_stage.m_name;
}
extern "C"
void set_thd_stage_info(void *opaque_thd,
const PSI_stage_info *new_stage,
PSI_stage_info *old_stage,
const char *calling_func,
const char *calling_file,
const unsigned int calling_line)
{
THD *thd= (THD*) opaque_thd;
if (thd == NULL)
thd= current_thd;
thd->enter_stage(new_stage, old_stage, calling_func, calling_file, calling_line);
}
void THD::enter_stage(const PSI_stage_info *new_stage,
PSI_stage_info *old_stage,
const char *calling_func,
const char *calling_file,
const unsigned int calling_line)
{
DBUG_PRINT("THD::enter_stage", ("%s:%d", calling_file, calling_line));
if (old_stage != NULL)
{
old_stage->m_key= m_current_stage_key;
old_stage->m_name= proc_info;
}
if (new_stage != NULL)
{
const char *msg= new_stage->m_name;
#if defined(ENABLED_PROFILING)
profiling.status_change(msg, calling_func, calling_file, calling_line);
#endif
m_current_stage_key= new_stage->m_key;
proc_info= msg;
MYSQL_SET_STAGE(m_current_stage_key, calling_file, calling_line);
}
return;
}
extern "C"
void thd_enter_cond(MYSQL_THD thd, mysql_cond_t *cond, mysql_mutex_t *mutex,
const PSI_stage_info *stage, PSI_stage_info *old_stage,
const char *src_function, const char *src_file,
int src_line)
{
if (!thd)
thd= current_thd;
return thd->enter_cond(cond, mutex, stage, old_stage,
src_function, src_file, src_line);
}
extern "C"
void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage,
const char *src_function, const char *src_file,
int src_line)
{
if (!thd)
thd= current_thd;
thd->exit_cond(stage, src_function, src_file, src_line);
return;
}
extern "C"
void **thd_ha_data(const THD *thd, const struct handlerton *hton)
{
return (void **) &thd->ha_data[hton->slot].ha_ptr;
}
extern "C"
void thd_storage_lock_wait(THD *thd, long long value)
{
thd->utime_after_lock+= value;
}
/**
Provide a handler data getter to simplify coding
*/
extern "C"
void *thd_get_ha_data(const THD *thd, const struct handlerton *hton)
{
return *thd_ha_data(thd, hton);
}
/**
Provide a handler data setter to simplify coding
@see thd_set_ha_data() definition in plugin.h
*/
extern "C"
void thd_set_ha_data(THD *thd, const struct handlerton *hton,
const void *ha_data)
{
plugin_ref *lock= &thd->ha_data[hton->slot].lock;
if (ha_data && !*lock)
*lock= ha_lock_engine(NULL, (handlerton*) hton);
else if (!ha_data && *lock)
{
plugin_unlock(NULL, *lock);
*lock= NULL;
}
*thd_ha_data(thd, hton)= (void*) ha_data;
}
extern "C"
long long thd_test_options(const THD *thd, long long test_options)
{
return thd->variables.option_bits & test_options;
}
extern "C"
int thd_sql_command(const THD *thd)
{
return (int) thd->lex->sql_command;
}
extern "C"
int thd_tx_isolation(const THD *thd)
{
return (int) thd->tx_isolation;
}
extern "C"
int thd_tx_is_read_only(const THD *thd)
{
return (int) thd->tx_read_only;
}
extern "C"
void thd_inc_row_count(THD *thd)
{
thd->get_stmt_da()->inc_current_row_for_warning();
}
extern "C"
void thd_store_lsn(THD* thd, ulonglong lsn, int engine_type)
{
DBUG_ASSERT(thd->prepared_engine != NULL);
thd->prepared_engine->update_lsn(engine_type, lsn);
}
/**
Dumps a text description of a thread, its security context
(user, host) and the current query.
@param thd thread context
@param buffer pointer to preferred result buffer
@param length length of buffer
@param max_query_len how many chars of query to copy (0 for all)
@req LOCK_thread_count
@note LOCK_thread_count mutex is not necessary when the function is invoked on
the currently running thread (current_thd) or if the caller in some other
way guarantees that access to thd->query is serialized.
@return Pointer to string
*/
extern "C"
char *thd_security_context(THD *thd, char *buffer, unsigned int length,
unsigned int max_query_len) {
return thd_security_context_internal(thd, buffer, length, max_query_len,
false /* show_query_digest */);
}
char *thd_security_context_internal(
THD *thd, char *buffer, unsigned int length, unsigned int max_query_len,
my_bool show_query_digest)
{
String str(buffer, length, &my_charset_latin1);
Security_context *sctx= &thd->main_security_ctx;
char header[256];
int len;
int err;
/*
The pointers thd->query and thd->proc_info might change since they are
being modified concurrently. This is acceptable for proc_info since its
values doesn't have to very accurate and the memory it points to is static,
but we need to attempt a snapshot on the pointer values to avoid using NULL
values. The pointer to thd->query however, doesn't point to static memory
and has to be protected by LOCK_thread_count or risk pointing to
uninitialized memory.
*/
const char *proc_info= thd->proc_info;
len= my_snprintf(header, sizeof(header),
"MySQL thread id %lu, OS thread handle 0x%lx, query id %lu",
thd->thread_id(), (ulong) thd->real_id, (ulong) thd->query_id);
str.length(0);
str.append(header, len);
if (sctx->get_host()->length())
{
str.append(' ');
str.append(sctx->get_host()->ptr());
}
if (sctx->get_ip()->length())
{
str.append(' ');
str.append(sctx->get_ip()->ptr());
}
if (sctx->user)
{
str.append(' ');
str.append(sctx->user);
}
if (proc_info)
{
str.append(' ');
str.append(proc_info);
}
/*
InnoDB might be holding a big kernel lock like kernel_mutex. Don't
block here to avoid deadlock -- http://bugs.mysql.com/60682
*/
err= mysql_mutex_trylock(&thd->LOCK_thd_data);
DBUG_EXECUTE_IF("pretend_thd_security_context_busy",
{ if (!err) {
mysql_mutex_unlock(&thd->LOCK_thd_data);
err= EBUSY;
} });
if (!err)
{
if (thd->query())
{
String digest_buffer;
const char *query_str;
const CHARSET_INFO *query_cs = NULL;
uint32 query_len;
if (show_query_digest) {
thd->get_query_digest(&digest_buffer, &query_str, &query_len,
&query_cs);
} else {
query_str = thd->query();
query_len = thd->query_length();
}
if (max_query_len >= 1) {
query_len= min(query_len, max_query_len);
}
str.append('\n');
str.append(query_str, query_len);
}
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
else
{
const char* busy_msg= "::BUSY::";
DBUG_ASSERT(err == EBUSY);
str.append('\n');
str.append(busy_msg, strlen(busy_msg));
}
if (str.c_ptr_safe() == buffer)
return buffer;
/*
We have to copy the new string to the destination buffer because the string
was reallocated to a larger buffer to be able to fit.
*/
DBUG_ASSERT(buffer != NULL);
length= min(str.length(), length-1);
memcpy(buffer, str.c_ptr_quick(), length);
/* Make sure that the new string is null terminated */
buffer[length]= '\0';
return buffer;
}
/**
Implementation of Drop_table_error_handler::handle_condition().
The reason in having this implementation is to silence technical low-level
warnings during DROP TABLE operation. Currently we don't want to expose
the following warnings during DROP TABLE:
- Some of table files are missed or invalid (the table is going to be
deleted anyway, so why bother that something was missed);
- A trigger associated with the table does not have DEFINER (One of the
MySQL specifics now is that triggers are loaded for the table being
dropped. So, we may have a warning that trigger does not have DEFINER
attribute during DROP TABLE operation).
@return TRUE if the condition is handled.
*/
bool Drop_table_error_handler::handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
Sql_condition::enum_warning_level level,
const char* msg,
Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
sql_errno == ER_TRG_NO_DEFINER);
}
void Open_tables_state::set_open_tables_state(Open_tables_state *state)
{
this->open_tables= state->open_tables;
this->temporary_tables= state->temporary_tables;
this->derived_tables= state->derived_tables;
this->lock= state->lock;
this->extra_lock= state->extra_lock;
this->locked_tables_mode= state->locked_tables_mode;
this->current_tablenr= state->current_tablenr;
this->state_flags= state->state_flags;
this->reset_reprepare_observers();
for (int i= 0; i < state->m_reprepare_observers.elements(); ++i)
this->push_reprepare_observer(state->m_reprepare_observers.at(i));
}
void Open_tables_state::reset_open_tables_state()
{
open_tables= NULL;
temporary_tables= NULL;
derived_tables= NULL;
lock= NULL;
extra_lock= NULL;
locked_tables_mode= LTM_NONE;
state_flags= 0U;
reset_reprepare_observers();
}
THD::THD(bool enable_plugins)
:Statement(&main_lex, &main_mem_root, STMT_CONVENTIONAL_EXECUTION,
/* statement id */ 0),
rli_fake(0), rli_slave(NULL),
is_admin_conn(false),
in_sub_stmt(0),
fill_status_recursion_level(0),
fill_variables_recursion_level(0),
binlog_row_event_extra_data(NULL),
binlog_unsafe_warning_flags(0),
binlog_table_maps(0),
binlog_accessed_db_names(NULL),
m_trans_log_file(NULL),
m_trans_fixed_log_file(NULL),
m_trans_end_pos(0),
m_trans_gtid(NULL),
m_trans_max_gtid(NULL),
table_map_for_update(0),
m_examined_row_count(0),
m_accessed_rows_and_keys(0),
m_digest(NULL),
m_statement_psi(NULL),
m_idle_psi(NULL),
m_server_idle(false),
next_to_commit(NULL),
is_fatal_error(0),
transaction_rollback_request(0),
is_fatal_sub_stmt_error(false),
rand_used(0),
time_zone_used(0),
in_lock_tables(0),
bootstrap(0),
derived_tables_processing(FALSE),
really_error_partial_strict(false),
really_audit_instrumented_event(0),
audited_event_for_command(false),
sp_runtime_ctx(NULL),
m_parser_state(NULL),
#if defined(ENABLED_DEBUG_SYNC)
debug_sync_control(0),
#endif /* defined(ENABLED_DEBUG_SYNC) */
m_enable_plugins(enable_plugins),
owned_gtid_set(global_sid_map),
connection_certificate_buf(NULL),
main_da(0, false),
m_stmt_da(&main_da),
conn_timeout_err_msg(NULL),
duplicate_slave_id(false),
db_stats(NULL)
{
ulong tmp;
reset_first_successful_insert_id();
mdl_context.init(this);
/*
Pass nominal parameters to init_alloc_root only to ensure that
the destructor works OK in case of an error. The main_mem_root
will be re-initialized in init_for_queries().
*/
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
stmt_arena= this;
thread_stack= 0;
catalog= (char*)"std"; // the only catalog we have for now
main_security_ctx.init();
security_ctx= &main_security_ctx;
no_errors= 0;
password= 0;
query_start_used= query_start_usec_used= 0;
count_cuted_fields= CHECK_FIELD_IGNORE;
killed= NOT_KILLED;
killed_reason= NULL;
col_access= 0;
is_slave_error= thread_specific_used= FALSE;
my_hash_clear(&handler_tables_hash);
tmp_table= 0;
cuted_fields= 0L;
m_sent_row_count= 0L;
limit_found_rows= 0;
m_row_count_func= -1;
statement_id_counter= 0UL;
// Must be reset to handle error with THD's created for init of mysqld
lex->current_select= 0;
user_time.tv_sec= 0;
user_time.tv_usec= 0;
start_time.tv_sec= 0;
start_time.tv_usec= 0;
start_utime= prior_thr_create_utime = 0L;
utime_after_lock= 0L;
current_linfo= 0;
slave_thread= 0;
memset(&variables, 0, sizeof(variables));
m_thread_id= 0;
system_thread_id= 0;
one_shot_set= 0;
file_id= 0;
query_id= 0;
query_name_consts= 0;
db_charset= global_system_variables.collation_database;
my_hash_clear(&db_read_only_hash);
memset(ha_data, 0, sizeof(ha_data));
mysys_var= 0;
binlog_evt_union.do_union= FALSE;
enable_slow_log= 0;
commit_error= CE_NONE;
commit_consensus_error= false;
last_cpu_info_result = -1;
cumulative_sql_stats = nullptr;
cpu_start_timespec = {};
should_update_stats = false;
durability_property= HA_REGULAR_DURABILITY;
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
#endif
#ifndef EMBEDDED_LIBRARY
mysql_mutex_init(key_LOCK_thd_audit_data, &LOCK_thd_audit_data,
MY_MUTEX_INIT_FAST);
mysql_audit_init_thd(this);
get_net()->vio=0;
#endif
client_capabilities= 0; // minimalistic client
ull=0;
system_thread= NON_SYSTEM_THREAD;
cleanup_done= abort_on_warning= 0;
m_release_resources_started = 0;
m_release_resources_done= false;
peer_port= 0; // For SHOW PROCESSLIST
transaction.m_pending_rows_event= 0;
transaction.flags.enabled= true;
#ifdef SIGNAL_WITH_VIO_SHUTDOWN
active_vio = 0;
#endif
mysql_mutex_init(key_LOCK_thd_data, &LOCK_thd_data, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_thd_db_read_only_hash, &LOCK_thd_db_read_only_hash,
MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_db_metadata, &LOCK_db_metadata,
MY_MUTEX_INIT_FAST);
/* Variables with default values */
proc_info="login";
where= THD::DEFAULT_WHERE;
server_id = ::server_id;
unmasked_server_id = server_id;
slave_net = 0;
set_command(COM_CONNECT);
*scramble= '\0';
skip_gtid_rollback= false;
/* Call to init() below requires fully initialized Open_tables_state. */
reset_open_tables_state();
init();
#if defined(ENABLED_PROFILING)
profiling.set_thd(this);
#endif
m_user_connect= NULL;
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
(my_hash_get_key) get_var_key,
(my_hash_free_key) free_user_var, 0);
sp_proc_cache= NULL;
sp_func_cache= NULL;
/* For user vars replication*/
if (opt_bin_log)
my_init_dynamic_array(&user_var_events,
sizeof(BINLOG_USER_VAR_EVENT *), 16, 16);
else
memset(&user_var_events, 0, sizeof(user_var_events));
/* Protocol */
protocol= &protocol_text; // Default protocol
protocol_text.init(this);
protocol_binary.init(this);
tablespace_op=FALSE;
should_write_gtid = TRUE;
tmp= sql_rnd_with_mutex();
randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
substitute_null_with_insert_id = FALSE;
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
m_internal_handler= NULL;
m_binlog_invoker= FALSE;
memset(&invoker_user, 0, sizeof(invoker_user));
memset(&invoker_host, 0, sizeof(invoker_host));
binlog_next_event_pos.file_name= NULL;
binlog_next_event_pos.pos= 0;
trans_gtid[0] = 0;
trans_max_gtid[0] = 0;
timer= NULL;
timer_cache= NULL;
#ifndef DBUG_OFF
gis_debug= 0;
#endif
ec= NULL;
m_token_array= NULL;
if (max_digest_length > 0)
{
m_token_array= (unsigned char*) my_malloc(max_digest_length,
MYF(MY_WME));
}
prepared_engine= NULL;
}
void THD::print_proc_info(const char *, ...)
{
}
void THD::push_internal_handler(Internal_error_handler *handler)
{
if (m_internal_handler)
{
handler->m_prev_internal_handler= m_internal_handler;
m_internal_handler= handler;
}
else
{
m_internal_handler= handler;
}
}
bool THD::handle_condition(uint sql_errno,
const char* sqlstate,
Sql_condition::enum_warning_level level,
const char* msg,
Sql_condition ** cond_hdl)
{
if (!m_internal_handler)
{
*cond_hdl= NULL;
return FALSE;
}
for (Internal_error_handler *error_handler= m_internal_handler;
error_handler;
error_handler= error_handler->m_prev_internal_handler)
{
if (error_handler->handle_condition(this, sql_errno, sqlstate, level, msg,
cond_hdl))
{
return TRUE;
}
}
return FALSE;
}
Internal_error_handler *THD::pop_internal_handler()
{
DBUG_ASSERT(m_internal_handler != NULL);
Internal_error_handler *popped_handler= m_internal_handler;
m_internal_handler= m_internal_handler->m_prev_internal_handler;
return popped_handler;
}
void THD::raise_error(uint sql_errno)
{
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_ERROR,
msg);
}
void THD::raise_error_printf(uint sql_errno, ...)
{
va_list args;
char ebuff[MYSQL_ERRMSG_SIZE];
DBUG_ENTER("THD::raise_error_printf");
DBUG_PRINT("my", ("nr: %d errno: %d", sql_errno, errno));
const char* format= ER(sql_errno);
va_start(args, sql_errno);
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
va_end(args);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_ERROR,
ebuff);
DBUG_VOID_RETURN;
}
void THD::raise_warning(uint sql_errno)
{
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_WARN,
msg);
}
void THD::raise_warning_printf(uint sql_errno, ...)
{
va_list args;
char ebuff[MYSQL_ERRMSG_SIZE];
DBUG_ENTER("THD::raise_warning_printf");
DBUG_PRINT("enter", ("warning: %u", sql_errno));
const char* format= ER(sql_errno);
va_start(args, sql_errno);
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
va_end(args);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_WARN,
ebuff);
DBUG_VOID_RETURN;
}
void THD::raise_note(uint sql_errno)
{
DBUG_ENTER("THD::raise_note");
DBUG_PRINT("enter", ("code: %d", sql_errno));
if (!(variables.option_bits & OPTION_SQL_NOTES))
DBUG_VOID_RETURN;
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_NOTE,
msg);
DBUG_VOID_RETURN;
}
void THD::raise_note_printf(uint sql_errno, ...)
{
va_list args;
char ebuff[MYSQL_ERRMSG_SIZE];
DBUG_ENTER("THD::raise_note_printf");
DBUG_PRINT("enter",("code: %u", sql_errno));
if (!(variables.option_bits & OPTION_SQL_NOTES))
DBUG_VOID_RETURN;
const char* format= ER(sql_errno);
va_start(args, sql_errno);
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
va_end(args);
(void) raise_condition(sql_errno,
NULL,
Sql_condition::WARN_LEVEL_NOTE,
ebuff);
DBUG_VOID_RETURN;
}
struct timeval THD::query_start_timeval_trunc(uint decimals)
{
struct timeval tv;
tv.tv_sec= start_time.tv_sec;
query_start_used= 1;
if (decimals)
{
tv.tv_usec= start_time.tv_usec;
my_timeval_trunc(&tv, decimals);
query_start_usec_used= 1;
}
else
{
tv.tv_usec= 0;
}
return tv;
}
Sql_condition* THD::raise_condition(uint sql_errno,
const char* sqlstate,
Sql_condition::enum_warning_level level,
const char* msg)
{
Diagnostics_area *da= get_stmt_da();
Sql_condition *cond= NULL;
DBUG_ENTER("THD::raise_condition");
if (!(variables.option_bits & OPTION_SQL_NOTES) &&
(level == Sql_condition::WARN_LEVEL_NOTE))
DBUG_RETURN(NULL);
da->opt_clear_warning_info(query_id);
/*
TODO: replace by DBUG_ASSERT(sql_errno != 0) once all bugs similar to
Bug#36768 are fixed: a SQL condition must have a real (!=0) error number
so that it can be caught by handlers.
*/
if (sql_errno == 0)
sql_errno= ER_UNKNOWN_ERROR;
if (msg == NULL)
msg= ER(sql_errno);
if (sqlstate == NULL)
sqlstate= mysql_errno_to_sqlstate(sql_errno);
bool is_slave_thread = (system_thread == SYSTEM_THREAD_SLAVE_SQL) ||
(system_thread == SYSTEM_THREAD_SLAVE_WORKER);
if ((level == Sql_condition::WARN_LEVEL_WARN) &&
((really_abort_on_warning() && !query_mt_throttled(sql_errno)) ||
(really_error_partial_strict && !is_slave_thread)))
{
if (really_audit_instrumented_event > 1 &&
!audited_event_for_command)
{
audited_event_for_command = true;
mysql_audit_general(this, MYSQL_AUDIT_GENERAL_ERROR_INSTR,
sql_errno,
msg);
}
/*
FIXME:
push_warning and strict SQL_MODE case.
*/
level= Sql_condition::WARN_LEVEL_ERROR;
killed= THD::KILL_BAD_DATA;
}
else if ((level == Sql_condition::WARN_LEVEL_WARN) &&
really_audit_instrumented_event > 0 &&
!audited_event_for_command &&
!is_slave_thread)
{
audited_event_for_command = true;
mysql_audit_general(this, MYSQL_AUDIT_GENERAL_WARNING,
sql_errno,
msg);
}
switch (level)
{
case Sql_condition::WARN_LEVEL_NOTE:
case Sql_condition::WARN_LEVEL_WARN:
got_warning= 1;
break;
case Sql_condition::WARN_LEVEL_ERROR:
break;
default:
DBUG_ASSERT(FALSE);
}
if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
DBUG_RETURN(cond);
if (level == Sql_condition::WARN_LEVEL_ERROR)
{
is_slave_error= 1; // needed to catch query errors during replication
/*
thd->lex->current_select == 0 if lex structure is not inited
(not query command (COM_QUERY))
*/
if (lex->current_select &&
lex->current_select->no_error && !is_fatal_error)
{
DBUG_PRINT("error",
("Error converted to warning: current_select: no_error %d "
"fatal_error: %d",
(lex->current_select ?
lex->current_select->no_error : 0),
(int) is_fatal_error));
}
else
{
if (!da->is_error())
{
set_row_count_func(-1);
da->set_error_status(sql_errno, msg, sqlstate, cond);
}
}
}
query_cache_abort(&query_cache_tls);
/*
Avoid pushing a condition for fatal out of memory errors as this will
require memory allocation and therefore might fail. Non fatal out of
memory errors can occur if raised by SIGNAL/RESIGNAL statement.
*/
if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
sql_errno == ER_OUTOFMEMORY)))
{
cond= da->push_warning(this, sql_errno, sqlstate, level, msg);
}
DBUG_RETURN(cond);
}
extern "C"
void *thd_alloc(MYSQL_THD thd, unsigned int size)
{
return thd->alloc(size);
}
extern "C"
void *thd_calloc(MYSQL_THD thd, unsigned int size)
{
return thd->calloc(size);
}
extern "C"
char *thd_strdup(MYSQL_THD thd, const char *str)
{
return thd->strdup(str);
}
extern "C"
char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size)
{
return thd->strmake(str, size);
}
extern "C"
LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
const char *str, unsigned int size,
int allocate_lex_string)
{
return thd->make_lex_string(lex_str, str, size,
(bool) allocate_lex_string);
}
extern "C"
void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size)
{
return thd->memdup(str, size);
}
extern "C"
void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
{
*xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
}
#ifdef _WIN32
extern "C" THD *_current_thd_noinline(void)
{
return my_pthread_getspecific_ptr(THD*,THR_THD);
}
#endif
/*
Init common variables that has to be reset on start and on cleanup_connection
*/
void THD::init(void)
{
mysql_mutex_lock(&LOCK_global_system_variables);
plugin_thdvar_init(this, m_enable_plugins);
/*
variables= global_system_variables above has reset
variables.pseudo_thread_id to 0. We need to correct it here to
avoid temporary tables replication failure.
*/
fix_pseudo_thread_id();
/*
variables= global_system_variables also clobbers several variables set
based on per-connection capabilities
*/
fix_capability_based_variables();
mysql_mutex_unlock(&LOCK_global_system_variables);
server_status= SERVER_STATUS_AUTOCOMMIT;
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
transaction.all.reset_unsafe_rollback_flags();
transaction.stmt.reset_unsafe_rollback_flags();
open_options=ha_open_options;
update_lock_default= (variables.low_priority_updates ?
TL_WRITE_LOW_PRIORITY :
TL_WRITE);
tx_isolation= (enum_tx_isolation) variables.tx_isolation;
tx_read_only= variables.tx_read_only;
update_charset();
reset_current_stmt_binlog_format_row();
reset_binlog_local_stmt_filter();
set_status_var_init();
binlog_row_event_extra_data= 0;
if (variables.sql_log_bin)
variables.option_bits|= OPTION_BIN_LOG;
else
variables.option_bits&= ~OPTION_BIN_LOG;
io_perf_read.init();
io_perf_write.init();
io_perf_read_blob.init();
io_perf_read_primary.init();
io_perf_read_secondary.init();
count_comment_bytes= 0;
#if defined(ENABLED_DEBUG_SYNC)
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
debug_sync_init_thread(this);
#endif /* defined(ENABLED_DEBUG_SYNC) */
/* Initialize session_tracker and create all tracker objects */
session_tracker.init(this->charset());
session_tracker.enable(this);
owned_gtid.sidno= 0;
owned_gtid.gno= 0;
/* Auto created snapshot is released after stmt. If somehow it wasn't,
then any snapshot is released on connection close or change user so
reset the auto flag as well. */
m_created_auto_stats_snapshot = false;
mt_key_clear(THD::SQL_ID);
mt_key_clear(THD::CLIENT_ID);
mt_key_clear(THD::PLAN_ID);
mt_key_clear(THD::SQL_HASH);
set_plan_capture(false);
reset_stmt_stats();
reset_all_mt_table_filled();
}
/*
Init THD for query processing.
This has to be called once before we call mysql_parse.
See also comments in sql_class.h.
*/
void THD::init_for_queries(Relay_log_info *rli)
{
set_time();
ha_enable_transaction(this,TRUE);
reset_root_defaults(mem_root, variables.query_alloc_block_size,
variables.query_prealloc_size);
reset_root_defaults(&transaction.mem_root,
variables.trans_alloc_block_size,
variables.trans_prealloc_size);
transaction.xid_state.xid.null();
transaction.xid_state.in_thd=1;
count_comment_bytes = 0;
#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
if (rli)
{
if ((rli->deferred_events_collecting= rpl_filter->is_on()))
{
rli->deferred_events= new Deferred_log_events(rli);
}
rli_slave= rli;
DBUG_ASSERT(rli_slave->info_thd == this && slave_thread);
}
#endif
}
/**
Reset statement stats counters before next statement.
*/
void THD::reset_stmt_stats()
{
m_tmp_table_bytes_written = 0; /* temp table space bytes written */
m_filesort_bytes_written = 0; /* filesort space bytes written */
m_index_dive_count = 0; /* index dive count */
m_index_dive_cpu = 0; /* index dive cpu time in microseconds */
m_compilation_cpu = 0; /* compilation cpu time in microseconds */
stmt_elapsed_utime = 0; /* statment elapsed time in microseconds */
/* The disk usage of a single statement is the difference between the peak
session usage during the statement execution and the session usage at
the start of the statement. So between statements the peak and offset
are set to the current usage, and the peak can only go up during the
statement execution (see adjust_by()). */
m_stmt_tmp_table_disk_usage_peak = status_var.tmp_table_disk_usage;
m_stmt_tmp_table_disk_usage_offset = status_var.tmp_table_disk_usage;
m_stmt_filesort_disk_usage_peak = status_var.filesort_disk_usage;
m_stmt_filesort_disk_usage_offset = status_var.filesort_disk_usage;
m_binlog_bytes_written = 0; /* binlog bytes written */
m_stmt_total_write_time = 0;
m_stmt_start_write_time_is_set = false;
}
/**
Reset the table-filled indicators before the next statement.
*/
void THD::reset_all_mt_table_filled()
{
for (int i = 0; i < MT_TABLE_NAME_MAX; i++)
{
mt_table_filled[i] = false;
}
}
/*
Do what's needed when one invokes change user
SYNOPSIS
cleanup_connection()
IMPLEMENTATION
Reset all resources that are connection specific
*/
void THD::cleanup_connection(void)
{
cleanup();
killed= NOT_KILLED;
cleanup_done= 0;
/* Aggregate to global status now that cleanup is done. */
mysql_mutex_lock(&LOCK_status);
add_to_status(&global_status_var, &status_var);
set_status_var_init();
mysql_mutex_unlock(&LOCK_status);
propagate_pending_global_disk_usage();
if (variables.sql_stats_snapshot)
toggle_sql_stats_snapshot(this);
init();
stmt_map.reset();
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
(my_hash_get_key) get_var_key,
(my_hash_free_key) free_user_var, 0);
sp_cache_clear(&sp_proc_cache);
sp_cache_clear(&sp_func_cache);
#if defined(HAVE_OPENSSL)
reset_connection_certificate();
#endif
clear_error();
#ifndef DBUG_OFF
/* DEBUG code only (begin) */
bool check_cleanup= FALSE;
DBUG_EXECUTE_IF("debug_test_cleanup_connection", check_cleanup= TRUE;);
if(check_cleanup)
{
/* isolation level should be default */
DBUG_ASSERT(variables.tx_isolation == ISO_REPEATABLE_READ);
/* check autocommit is ON by default */
DBUG_ASSERT(server_status == SERVER_STATUS_AUTOCOMMIT);
/* check prepared stmts are cleaned up */
DBUG_ASSERT(prepared_stmt_count == 0);
/* check diagnostic area is cleaned up */
DBUG_ASSERT(get_stmt_da()->status() == Diagnostics_area::DA_EMPTY);
/* check if temp tables are deleted */
DBUG_ASSERT(temporary_tables == NULL);
/* check if tables are unlocked */
DBUG_ASSERT(locked_tables_list.locked_tables() == NULL);
}
/* DEBUG code only (end) */
#endif
}
my_thread_id THD::set_new_thread_id()
{
/*
DBUG_EXECUTE_IF("skip_to_largest_thread_id", {
total_thread_ids.store(std::numeric_limits<uint32_t>::max());
}
);
DBUG_EXECUTE_IF("skip_to_second_largest_thread_id", {
total_thread_ids.store(std::numeric_limits<uint32_t>::max() - 1);
}
);
*/
m_thread_id = my_thread_id(++total_thread_ids);
return m_thread_id;
}
/*
Do what's needed when one invokes change user.
Also used during THD::release_resources, i.e. prior to THD destruction.
*/
void THD::cleanup(void)
{
DBUG_ENTER("THD::cleanup");
DBUG_ASSERT(cleanup_done == 0);
DEBUG_SYNC(this, "thd_cleanup_start");
killed= KILL_CONNECTION;
if (lex)
{
lex->sql_command= SQLCOM_END;
}
#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
if (transaction.xid_state.xa_state == XA_PREPARED)
{
#error xid_state in the cache should be replaced by the allocated value
}
#endif
{
transaction.xid_state.xa_state= XA_NOTR;
trans_rollback(this);
xid_cache_delete(&transaction.xid_state);
}
locked_tables_list.unlock_locked_tables(this);
mysql_ha_cleanup(this);
DBUG_ASSERT(open_tables == NULL);
/*
If the thread was in the middle of an ongoing transaction (rolled
back a few lines above) or under LOCK TABLES (unlocked the tables
and left the mode a few lines above), there will be outstanding
metadata locks. Release them.
*/
mdl_context.release_transactional_locks();
/* Release the global read lock, if acquired. */
if (global_read_lock.is_acquired())
global_read_lock.unlock_global_read_lock(this);
/* All metadata locks must have been released by now. */
DBUG_ASSERT(!mdl_context.has_locks());
delete_dynamic(&user_var_events);
my_hash_free(&user_vars);
close_temporary_tables(this);
sp_cache_clear(&sp_proc_cache);
sp_cache_clear(&sp_func_cache);
if (ull)
{
mysql_mutex_lock(&LOCK_user_locks);
item_user_lock_release(ull);
mysql_mutex_unlock(&LOCK_user_locks);
ull= NULL;
}
/*
Actions above might generate events for the binary log, so we
commit the current transaction coordinator after executing cleanup
actions.
*/
if (tc_log)
tc_log->commit(this, true, false);
/*
Debug sync system must be closed after tc_log->commit(), because
DEBUG_SYNC is used in commit code.
*/
#if defined(ENABLED_DEBUG_SYNC)
/* End the Debug Sync Facility. See debug_sync.cc. */
debug_sync_end_thread(this);
#endif /* defined(ENABLED_DEBUG_SYNC) */
session_tracker.deinit();
cleanup_done=1;
DBUG_VOID_RETURN;
}
/**
Release most resources, prior to THD destruction.
*/
void THD::release_resources()
{
mutex_assert_not_owner_shard(SHARDED(&LOCK_thread_count), this);
DBUG_ASSERT(m_release_resources_done == false);
if (variables.sql_stats_snapshot)
toggle_sql_stats_snapshot(this);
/* Ensure that no one is using THD */
mysql_mutex_lock(&LOCK_thd_data);
m_release_resources_started = 1;
if (m_explicit_snapshot)
set_explicit_snapshot(nullptr);
/* if we are still in admission control, release it */
if (is_in_ac)
{
multi_tenancy_exit_query(this);
}
/* Close connection */
#ifndef EMBEDDED_LIBRARY
NET* net = get_net_nullable();
if (net != nullptr && net->vio) {
vio_delete(net->vio);
net_end(net);
net->vio= NULL;
}
#if defined(HAVE_OPENSSL)
reset_connection_certificate();
#endif
#endif
mysql_mutex_unlock(&LOCK_thd_data);
stmt_map.reset(); /* close all prepared statements */
if (!cleanup_done)
cleanup();
mdl_context.destroy();
ha_close_connection(this);
mysql_audit_release(this);
if (m_enable_plugins)
plugin_thdvar_cleanup(this);
#ifdef HAVE_MY_TIMER
DBUG_ASSERT(timer == NULL);
if (timer_cache)
thd_timer_destroy(timer_cache);
#endif
/* Aggregate to global status now that operations above are done. */
mysql_mutex_lock(&LOCK_status);
add_to_status(&global_status_var, &status_var);
set_status_var_init();
mysql_mutex_unlock(&LOCK_status);
propagate_pending_global_disk_usage();
m_release_resources_done= true;
}
THD::~THD()
{
mutex_assert_not_owner_shard(SHARDED(&LOCK_thread_count), this);
THD_CHECK_SENTRY(this);
DBUG_ENTER("~THD()");
DBUG_PRINT("info", ("THD dtor, this %p", this));
if (!m_release_resources_done)
release_resources();
clear_next_event_pos();
/* Ensure that no one is using THD */
mysql_mutex_lock(&LOCK_thd_data);
mysql_mutex_unlock(&LOCK_thd_data);
DBUG_PRINT("info", ("freeing security context"));
main_security_ctx.destroy();
my_free(db);
db= NULL;
free_root(&transaction.mem_root,MYF(0));
mysql_mutex_destroy(&LOCK_thd_data);
mysql_mutex_lock(&LOCK_thd_db_read_only_hash);
my_hash_free(&db_read_only_hash);
mysql_mutex_unlock(&LOCK_thd_db_read_only_hash);
mysql_mutex_destroy(&LOCK_thd_db_read_only_hash);
mysql_mutex_destroy(&LOCK_db_metadata);
#ifndef DBUG_OFF
dbug_sentry= THD_SENTRY_GONE;
#endif
#ifndef EMBEDDED_LIBRARY
if (rli_fake)
{
rli_fake->end_info();
delete rli_fake;
rli_fake= NULL;
}
if (variables.gtid_next_list.gtid_set != NULL)
{
#ifdef HAVE_GTID_NEXT_LIST
delete variables.gtid_next_list.gtid_set;
variables.gtid_next_list.gtid_set= NULL;
variables.gtid_next_list.is_non_null= false;
#else
DBUG_ASSERT(0);
#endif
}
mysql_audit_free_thd(this);
if (rli_slave)
rli_slave->cleanup_after_session();
#endif
if (prepared_engine)
delete prepared_engine;
delete ec;
free_root(&main_mem_root, MYF(0));
if (m_token_array != NULL)
{
my_free(m_token_array);
}
if (killed_reason != NULL)
{
my_free(killed_reason);
}
DBUG_VOID_RETURN;
}
/*
Add all status variables to another status variable array
SYNOPSIS
add_to_status()
to_var add to this array
from_var from this array
NOTES
This function assumes that all variables are longlong/ulonglong.
If this assumption will change, then we have to explictely add
the other variables after the while loop
*/
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
{
int c;
ulonglong *end= (ulonglong*) ((uchar*) to_var +
offsetof(STATUS_VAR, last_system_status_var) +
sizeof(ulonglong));
ulonglong *to= (ulonglong*) to_var, *from= (ulonglong*) from_var;
while (to != end)
*(to++)+= *(from++);
to_var->com_other+= from_var->com_other;
for (c= 0; c< SQLCOM_END; c++)
to_var->com_stat[(uint) c] += from_var->com_stat[(uint) c];
}
/*
Add the difference between two status variable arrays to another one.
SYNOPSIS
add_diff_to_status
to_var add to this array
from_var from this array
dec_var minus this array
NOTE
This function assumes that all variables are longlong/ulonglong.
*/
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
STATUS_VAR *dec_var)
{
int c;
ulonglong *end= (ulonglong*) ((uchar*) to_var + offsetof(STATUS_VAR,
last_system_status_var) +
sizeof(ulonglong));
ulonglong *to= (ulonglong*) to_var,
*from= (ulonglong*) from_var,
*dec= (ulonglong*) dec_var;
while (to != end)
*(to++)+= *(from++) - *(dec++);
to_var->com_other+= from_var->com_other - dec_var->com_other;
for (c= 0; c< SQLCOM_END; c++)
to_var->com_stat[(uint) c] += from_var->com_stat[(uint) c] -dec_var->com_stat[(uint) c];
}
/**
Awake a thread.
@param[in] state_to_set value for THD::killed
This is normally called from another thread's THD object.
@note Do always call this while holding LOCK_thd_data.
*/
void THD::awake(THD::killed_state state_to_set, const char *reason)
{
DBUG_ENTER("THD::awake");
DBUG_PRINT("enter", ("this: %p current_thd: %p", this, current_thd));
THD_CHECK_SENTRY(this);
mysql_mutex_assert_owner(&LOCK_thd_data);
/* Set the 'killed' flag of 'this', which is the target THD object. */
killed= state_to_set;
if (reason) {
static constexpr int len = KILLED_REASON_MAX_LEN;
if (!killed_reason) {
killed_reason = (char *)my_malloc(len, MYF(0));
}
if (killed_reason) {
strncpy(killed_reason, reason, len - 1);
killed_reason[len - 1] = '\0';
}
} else {
if (killed_reason && killed_reason[0] != '\0') {
// no reason is given, let's clean up previous killed_reason
killed_reason[0] = '\0';
}
}
if (state_to_set != THD::KILL_QUERY && state_to_set != THD::KILL_TIMEOUT)
{
#ifdef SIGNAL_WITH_VIO_SHUTDOWN
if (this != current_thd)
{
/*
Before sending a signal, let's close the socket of the thread
that is being killed ("this", which is not the current thread).
This is to make sure it does not block if the signal is lost.
This needs to be done only on platforms where signals are not
a reliable interruption mechanism.
Note that the downside of this mechanism is that we could close
the connection while "this" target thread is in the middle of
sending a result to the application, thus violating the client-
server protocol.
On the other hand, without closing the socket we have a race
condition. If "this" target thread passes the check of
thd->killed, and then the current thread runs through
THD::awake(), sets the 'killed' flag and completes the
signaling, and then the target thread runs into read(), it will
block on the socket. As a result of the discussions around
Bug#37780, it has been decided that we accept the race
condition. A second KILL awakes the target from read().
If we are killing ourselves, we know that we are not blocked.
We also know that we will check thd->killed before we go for
reading the next statement.
*/
shutdown_active_vio();
}
#endif
/* Mark the target thread's alarm request expired, and signal alarm. */
thr_alarm_kill(m_thread_id);
/* Send an event to the scheduler that a thread should be killed. */
if (!slave_thread)
MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (this));
}
/* Interrupt target waiting inside a storage engine. */
if (state_to_set != THD::NOT_KILLED)
ha_kill_connection(this);
if (state_to_set == THD::KILL_TIMEOUT)
status_var.max_statement_time_exceeded++;
/* Broadcast a condition to kick the target if it is waiting on it. */
if (mysys_var)
{
mysql_mutex_lock(&mysys_var->mutex);
if (!system_thread) // Don't abort locks
mysys_var->abort=1;
/*
This broadcast could be up in the air if the victim thread
exits the cond in the time between read and broadcast, but that is
ok since all we want to do is to make the victim thread get out
of waiting on current_cond.
If we see a non-zero current_cond: it cannot be an old value (because
then exit_cond() should have run and it can't because we have mutex); so
it is the true value but maybe current_mutex is not yet non-zero (we're
in the middle of enter_cond() and there is a "memory order
inversion"). So we test the mutex too to not lock 0.
Note that there is a small chance we fail to kill. If victim has locked
current_mutex, but hasn't yet entered enter_cond() (which means that
current_cond and current_mutex are 0), then the victim will not get
a signal and it may wait "forever" on the cond (until
we issue a second KILL or the status it's waiting for happens).
It's true that we have set its thd->killed but it may not
see it immediately and so may have time to reach the cond_wait().
However, where possible, we test for killed once again after
enter_cond(). This should make the signaling as safe as possible.
However, there is still a small chance of failure on platforms with
instruction or memory write reordering.
*/
if (mysys_var->current_cond && mysys_var->current_mutex)
{
DBUG_EXECUTE_IF("before_dump_thread_acquires_current_mutex",
{
const char act[]=
"now signal dump_thread_signal wait_for go_dump_thread";
DBUG_ASSERT(!debug_sync_set_action(current_thd,
STRING_WITH_LEN(act)));
};);
mysql_mutex_lock(mysys_var->current_mutex);
mysql_cond_broadcast(mysys_var->current_cond);
mysql_mutex_unlock(mysys_var->current_mutex);
}
mysql_mutex_unlock(&mysys_var->mutex);
}
DBUG_VOID_RETURN;
}
/**
Close the Vio associated this session.
@remark LOCK_thd_data is taken due to the fact that
the Vio might be disassociated concurrently.
*/
void THD::disconnect()
{
Vio *vio= NULL;
mysql_mutex_lock(&LOCK_thd_data);
killed= THD::KILL_CONNECTION;
#ifdef SIGNAL_WITH_VIO_SHUTDOWN
/*
Since a active vio might might have not been set yet, in
any case save a reference to avoid closing a inexistent
one or closing the vio twice if there is a active one.
*/
vio= active_vio;
shutdown_active_vio();
#endif
/* Disconnect even if a active vio is not associated. */
Vio* net_vio = get_net()->vio;
if (net_vio != vio && net_vio != NULL)
{
vio_shutdown(net_vio);
}
mysql_mutex_unlock(&LOCK_thd_data);
}
bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use,
bool needs_thr_lock_abort)
{
THD *in_use= ctx_in_use->get_thd();
bool signalled= FALSE;
if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
!in_use->killed)
{
in_use->killed= THD::KILL_CONNECTION;
mysql_mutex_lock(&in_use->mysys_var->mutex);
if (in_use->mysys_var->current_cond)
mysql_cond_broadcast(in_use->mysys_var->current_cond);
mysql_mutex_unlock(&in_use->mysys_var->mutex);
signalled= TRUE;
}
if (needs_thr_lock_abort)
{
mysql_mutex_lock(&in_use->LOCK_thd_data);
for (TABLE *thd_table= in_use->open_tables;
thd_table ;
thd_table= thd_table->next)
{
/*
Check for TABLE::needs_reopen() is needed since in some places we call
handler::close() for table instance (and set TABLE::db_stat to 0)
and do not remove such instances from the THD::open_tables
for some time, during which other thread can see those instances
(e.g. see partitioning code).
*/
if (!thd_table->needs_reopen())
signalled|= mysql_lock_abort_for_thread(this, thd_table);
}
mysql_mutex_unlock(&in_use->LOCK_thd_data);
}
return signalled;
}
#ifndef EMBEDDED_LIBRARY
static void kill_one_srv_session(
Srv_session& srv_session,
bool& only_kill_query)
{
if (!only_kill_query)
{
// For safety, clear the db_read_only_hash data structure. This is
// because after a THD is removed from the srv_session map (which will
// happen when the thread is woken up later it's possible for entries
// in that hash to be free'd, without this THD being updated to reflect
// that. This is because this THD now exists outside of both
// global_thread_list and the srv_session map, and cannot updated by other
// threads.
//
// It's unlikely that the kill code paths will need to read from
// db_read_only_hash, but it's hard to verify all codepaths.
THD* thd = srv_session.get_thd();
mysql_mutex_lock(&thd->LOCK_thd_db_read_only_hash);
my_hash_free(&thd->db_read_only_hash);
mysql_mutex_unlock(&thd->LOCK_thd_db_read_only_hash);
// Set up the session to wake up immediately and cleanup. We don't want
// to do any of the work in this thread as it may be holding locks that
// would conflict with cleaning up the srv_session.
DBUG_PRINT("info", ("Kill CONN for srv_session, use background kill."));
srv_session.enableImmediateKill();
}
// We can't exit early if the session is not attached because
// it might have already been removed from the map
// continue to kill the query, set only_kill_query=true as connection
// should still remain active
only_kill_query = true;
}
#endif
/**
kill one thread.
@param thd Thread class
@param id Thread id
@param only_kill_query Should it kill the query or the connection
@param reason The reason why this is killed
@note
This is written such that we have a short lock on LOCK_thread_count
*/
uint THD::kill_one_thread(my_thread_id id, bool only_kill_query,
const char *reason)
{
uint error=ER_NO_SUCH_THREAD;
DBUG_ENTER("kill_one_thread");
DBUG_PRINT("enter", ("id=%u only_kill=%d", id, only_kill_query));
THD* other = NULL;
// maybe it's a srv_session id
std::shared_ptr<Srv_session> srv_session;
#ifndef EMBEDDED_LIBRARY
srv_session = Srv_session::access_session(id);
if (srv_session)
{
other = srv_session->get_thd();
mysql_mutex_lock(&other->LOCK_thd_data);
}
#endif
if (!srv_session)
{
/* If successful we'll have LOCK_thd_data on return. */
other= find_thd_from_id(id);
#ifndef EMBEDDED_LIBRARY
if (other != nullptr) {
/* If the conn has an attached srv_session,redirect the kill to it */
srv_session = other->get_attached_srv_session_safe();
if (srv_session != nullptr) {
mysql_mutex_unlock(&other->LOCK_thd_data);
other = srv_session->get_thd();
mysql_mutex_lock(&other->LOCK_thd_data);
}
}
#endif
}
if (other)
{
/*
If we're SUPER, we can KILL anything, including system-threads.
No further checks.
KILLer: thd->security_ctx->user could in theory be NULL while
we're still in "unauthenticated" state. This is a theoretical
case (the code suggests this could happen, so we play it safe).
KILLee: other->security_ctx->user will be NULL for system threads.
We need to check so Jane Random User doesn't crash the server
when trying to kill a) system threads or b) unauthenticated users'
threads (Bug#43748).
If user of both killer and killee are non-NULL, proceed with
slayage if both are string-equal.
*/
if ((security_ctx->master_access & SUPER_ACL) ||
security_ctx->user_matches(other->security_ctx))
{
/* process the kill only if thread is not already undergoing any kill
connection.
*/
if (other->killed != THD::KILL_CONNECTION)
{
#ifndef EMBEDDED_LIBRARY
if (srv_session) {
kill_one_srv_session(*srv_session, only_kill_query);
}
#endif
other->awake(only_kill_query ? THD::KILL_QUERY : THD::KILL_CONNECTION,
reason);
}
error= 0;
}
else
error=ER_KILL_DENIED_ERROR;
mysql_mutex_unlock(&other->LOCK_thd_data);
}
DBUG_PRINT("exit", ("%d", error));
DBUG_RETURN(error);
}
bool THD::kill_shared_locks(MDL_context_owner *ctx_in_use)
{
THD *in_use= ctx_in_use->get_thd();
// Only allow super user with ddl command to kill blocking threads
if (this->security_ctx->master_access & SUPER_ACL) {
bool is_high_priority_ddl =
(this->variables.high_priority_ddl || lex->high_priority_ddl ||
(slave_thread && slave_high_priority_ddl)) &&
support_high_priority(lex->sql_command);
if (is_high_priority_ddl || variables.kill_conflicting_connections)
return kill_one_thread(in_use, false) == 0;
}
return false;
}
/*
Remember the location of thread info, the structure needed for
sql_alloc() and the structure for the net buffer
*/
bool THD::store_globals()
{
/*
Assert that thread_stack is initialized: it's necessary to be able
to track stack overrun.
*/
DBUG_ASSERT(thread_stack);
if (my_pthread_setspecific_ptr(THR_THD, this) ||
my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
return 1;
/*
mysys_var is concurrently readable by a killer thread.
It is protected by LOCK_thd_data, it is not needed to lock while the
pointer is changing from NULL not non-NULL. If the kill thread reads
NULL it doesn't refer to anything, but if it is non-NULL we need to
ensure that the thread doesn't proceed to assign another thread to
have the mysys_var reference (which in fact refers to the worker
threads local storage with key THR_KEY_mysys.
*/
mysys_var=my_thread_var;
DBUG_PRINT("debug", ("mysys_var: 0x%llx", (ulonglong) mysys_var));
/*
Let mysqld define the thread id (not mysys)
This allows us to move THD to different threads if needed.
*/
mysys_var->id= m_thread_id;
real_id= pthread_self(); // For debugging
#ifdef TARGET_OS_LINUX
capture_system_thread_id();
#endif // TARGET_OS_LINUX
/*
We have to call thr_lock_info_init() again here as THD may have been
created in another thread
*/
thr_lock_info_init(&lock_info);
return 0;
}
/*
Remove the thread specific info (THD and mem_root pointer) stored during
store_global call for this thread.
*/
bool THD::restore_globals()
{
/*
Assert that thread_stack is initialized: it's necessary to be able
to track stack overrun.
*/
DBUG_ASSERT(thread_stack);
/* Undocking the thread specific data. */
my_pthread_setspecific_ptr(THR_THD, NULL);
my_pthread_setspecific_ptr(THR_MALLOC, NULL);
return 0;
}
/*
Cleanup after query.
SYNOPSIS
THD::cleanup_after_query()
DESCRIPTION
This function is used to reset thread data to its default state.
NOTE
This function is not suitable for setting thread data to some
non-default values, as there is only one replication thread, so
different master threads may overwrite data of each other on
slave.
*/
void THD::cleanup_after_query()
{
/*
Reset rand_used so that detection of calls to rand() will save random
seeds if needed by the slave.
Do not reset rand_used if inside a stored function or trigger because
only the call to these operations is logged. Thus only the calling
statement needs to detect rand() calls made by its substatements. These
substatements must not set rand_used to 0 because it would remove the
detection of rand() by the calling statement.
*/
if (!in_sub_stmt) /* stored functions and triggers are a special case */
{
/* Forget those values, for next binlogger: */
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
auto_inc_intervals_in_cur_stmt_for_binlog.empty();
rand_used= 0;
binlog_accessed_db_names= NULL;
if (gtid_mode > 0)
gtid_post_statement_checks(this);
#ifndef EMBEDDED_LIBRARY
/*
Clean possible unused INSERT_ID events by current statement.
is_update_query() is needed to ignore SET statements:
Statements that don't update anything directly and don't
used stored functions. This is mostly necessary to ignore
statements in binlog between SET INSERT_ID and DML statement
which is intended to consume its event (there can be other
SET statements between them).
*/
if ((rli_slave || rli_fake) && is_update_query(lex->sql_command))
auto_inc_intervals_forced.empty();
#endif
}
/*
In case of stored procedures, stored functions, triggers and events
m_trans_fixed_log_file will not be set to NULL. The memory will be reused.
*/
if (!sp_runtime_ctx)
m_trans_fixed_log_file= NULL;
/*
Forget the binlog stmt filter for the next query.
There are some code paths that:
- do not call THD::decide_logging_format()
- do call THD::binlog_query(),
making this reset necessary.
*/
reset_binlog_local_stmt_filter();
if (first_successful_insert_id_in_cur_stmt > 0)
{
/* set what LAST_INSERT_ID() will return */
first_successful_insert_id_in_prev_stmt=
first_successful_insert_id_in_cur_stmt;
first_successful_insert_id_in_cur_stmt= 0;
substitute_null_with_insert_id= TRUE;
}
arg_of_last_insert_id_function= 0;
/* Free Items that were created during this execution */
free_items();
/* Reset where. */
where= THD::DEFAULT_WHERE;
/* reset table map for multi-table update */
table_map_for_update= 0;
m_binlog_invoker= FALSE;
/* reset replication info structure */
if (lex && lex->mi.repl_ignore_server_ids.buffer)
{
delete_dynamic(&lex->mi.repl_ignore_server_ids);
}
#ifndef EMBEDDED_LIBRARY
if (rli_slave)
rli_slave->cleanup_after_query();
#endif
}
LEX_STRING *
make_lex_string_root(MEM_ROOT *mem_root,
LEX_STRING *lex_str, const char* str, uint length,
bool allocate_lex_string)
{
if (allocate_lex_string)
if (!(lex_str= (LEX_STRING *)alloc_root(mem_root, sizeof(LEX_STRING))))
return 0;
if (!(lex_str->str= strmake_root(mem_root, str, length)))
return 0;
lex_str->length= length;
return lex_str;
}
/**
Create a LEX_STRING in this connection.
@param lex_str pointer to LEX_STRING object to be initialized
@param str initializer to be copied into lex_str
@param length length of str, in bytes
@param allocate_lex_string if TRUE, allocate new LEX_STRING object,
instead of using lex_str value
@return NULL on failure, or pointer to the LEX_STRING object
*/
LEX_STRING *THD::make_lex_string(LEX_STRING *lex_str,
const char* str, uint length,
bool allocate_lex_string)
{
return make_lex_string_root (mem_root, lex_str, str,
length, allocate_lex_string);
}
/*
Convert a string to another character set
@param to Store new allocated string here
@param to_cs New character set for allocated string
@param from String to convert
@param from_length Length of string to convert
@param from_cs Original character set
@note to will be 0-terminated to make it easy to pass to system funcs
@retval false ok
@retval true End of memory.
In this case to->str will point to 0 and to->length will be 0.
*/
bool THD::convert_string(LEX_STRING *to, const CHARSET_INFO *to_cs,
const char *from, uint from_length,
const CHARSET_INFO *from_cs)
{
DBUG_ENTER("convert_string");
size_t new_length= to_cs->mbmaxlen * from_length;
uint errors= 0;
if (!(to->str= (char*) alloc(new_length+1)))
{
to->length= 0; // Safety fix
DBUG_RETURN(1); // EOM
}
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
from, from_length, from_cs, &errors);
to->str[to->length]=0; // Safety
if (errors != 0)
{
char printable_buff[32];
convert_to_printable(printable_buff, sizeof(printable_buff),
from, from_length, from_cs, 6);
push_warning_printf(this, Sql_condition::WARN_LEVEL_WARN,
ER_INVALID_CHARACTER_STRING,
ER_THD(this, ER_INVALID_CHARACTER_STRING),
from_cs->csname, printable_buff);
}
DBUG_RETURN(0);
}
/*
Convert string from source character set to target character set inplace.
SYNOPSIS
THD::convert_string
DESCRIPTION
Convert string using convert_buffer - buffer for character set
conversion shared between all protocols.
RETURN
0 ok
!0 out of memory
*/
bool THD::convert_string(String *s, const CHARSET_INFO *from_cs,
const CHARSET_INFO *to_cs)
{
uint dummy_errors;
if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors))
return TRUE;
/* If convert_buffer >> s copying is more efficient long term */
if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 ||
!s->is_alloced())
{
return s->copy(convert_buffer);
}
s->swap(convert_buffer);
return FALSE;
}
/*
Update some cache variables when character set changes
*/
void THD::update_charset()
{
uint32 not_used;
charset_is_system_charset=
!String::needs_conversion(0,
variables.character_set_client,
system_charset_info,
¬_used);
charset_is_collation_connection=
!String::needs_conversion(0,
variables.character_set_client,
variables.collation_connection,
¬_used);
charset_is_character_set_filesystem=
!String::needs_conversion(0,
variables.character_set_client,
variables.character_set_filesystem,
¬_used);
}
/* routings to adding tables to list of changed in transaction tables */
inline static void list_include(CHANGED_TABLE_LIST** prev,
CHANGED_TABLE_LIST* curr,
CHANGED_TABLE_LIST* new_table)
{
if (new_table)
{
*prev = new_table;
(*prev)->next = curr;
}
}
/* add table to list of changed in transaction tables */
void THD::add_changed_table(TABLE *table)
{
DBUG_ENTER("THD::add_changed_table(table)");
DBUG_ASSERT(in_multi_stmt_transaction_mode() && table->file->has_transactions());
add_changed_table(table->s->table_cache_key.str,
(long) table->s->table_cache_key.length);
DBUG_VOID_RETURN;
}
void THD::add_changed_table(const char *key, long key_length)
{
DBUG_ENTER("THD::add_changed_table(key)");
CHANGED_TABLE_LIST **prev_changed = &transaction.changed_tables;
CHANGED_TABLE_LIST *curr = transaction.changed_tables;
for (; curr; prev_changed = &(curr->next), curr = curr->next)
{
int cmp = (long)curr->key_length - (long)key_length;
if (cmp < 0)
{
list_include(prev_changed, curr, changed_table_dup(key, key_length));
DBUG_PRINT("info",
("key_length: %ld %u", key_length,
(*prev_changed)->key_length));
DBUG_VOID_RETURN;
}
else if (cmp == 0)
{
cmp = memcmp(curr->key, key, curr->key_length);
if (cmp < 0)
{
list_include(prev_changed, curr, changed_table_dup(key, key_length));
DBUG_PRINT("info",
("key_length: %ld %u", key_length,
(*prev_changed)->key_length));
DBUG_VOID_RETURN;
}
else if (cmp == 0)
{
DBUG_PRINT("info", ("already in list"));
DBUG_VOID_RETURN;
}
}
}
*prev_changed = changed_table_dup(key, key_length);
DBUG_PRINT("info", ("key_length: %ld %u", key_length,
(*prev_changed)->key_length));
DBUG_VOID_RETURN;
}
CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
{
CHANGED_TABLE_LIST* new_table =
(CHANGED_TABLE_LIST*) trans_alloc(ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST))+
key_length + 1);
if (!new_table)
{
my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATALERROR),
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
killed= KILL_CONNECTION;
return 0;
}
new_table->key= ((char*)new_table)+ ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST));
new_table->next = 0;
new_table->key_length = key_length;
::memcpy(new_table->key, key, key_length);
return new_table;
}
int THD::send_explain_fields(select_result *result)
{
List<Item> field_list;
Item *item;
CHARSET_INFO *cs= system_charset_info;
field_list.push_back(new Item_return_int("id",3, MYSQL_TYPE_LONGLONG));
field_list.push_back(new Item_empty_string("select_type", 19, cs));
field_list.push_back(item= new Item_empty_string("table", NAME_CHAR_LEN, cs));
item->maybe_null= 1;
if (lex->describe & DESCRIBE_PARTITIONS)
{
/* Maximum length of string that make_used_partitions_str() can produce */
item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
cs);
field_list.push_back(item);
item->maybe_null= 1;
}
field_list.push_back(item= new Item_empty_string("type", 10, cs));
item->maybe_null= 1;
field_list.push_back(item=new Item_empty_string("possible_keys",
NAME_CHAR_LEN*MAX_KEY, cs));
item->maybe_null=1;
field_list.push_back(item=new Item_empty_string("key", NAME_CHAR_LEN, cs));
item->maybe_null=1;
field_list.push_back(item=new Item_empty_string("key_len",
NAME_CHAR_LEN*MAX_KEY));
item->maybe_null=1;
field_list.push_back(item=new Item_empty_string("ref",
NAME_CHAR_LEN*MAX_REF_PARTS,
cs));
item->maybe_null=1;
field_list.push_back(item= new Item_return_int("rows", 10,
MYSQL_TYPE_LONGLONG));
item->maybe_null= 1;
if (lex->describe & DESCRIBE_EXTENDED)
{
field_list.push_back(item= new Item_float(NAME_STRING("filtered"),
0.1234, 2, 4));
item->maybe_null=1;
}
field_list.push_back(new Item_empty_string("Extra", 255, cs));
item->maybe_null= 1;
return (result->send_result_set_metadata(field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
}
#ifdef SIGNAL_WITH_VIO_SHUTDOWN
void THD::shutdown_active_vio()
{
DBUG_ENTER("shutdown_active_vio");
mysql_mutex_assert_owner(&LOCK_thd_data);
#ifndef EMBEDDED_LIBRARY
if (active_vio)
{
vio_shutdown(active_vio);
active_vio = 0;
}
#endif
DBUG_VOID_RETURN;
}
#endif
/*
Register an item tree tree transformation, performed by the query
optimizer. We need a pointer to runtime_memroot because it may be !=
thd->mem_root (due to possible set_n_backup_active_arena called for thd).
*/
void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
MEM_ROOT *runtime_memroot)
{
Item_change_record *change;
/*
Now we use one node per change, which adds some memory overhead,
but still is rather fast as we use alloc_root for allocations.
A list of item tree changes of an average query should be short.
*/
void *change_mem= alloc_root(runtime_memroot, sizeof(*change));
if (change_mem == 0)
{
/*
OOM, thd->fatal_error() is called by the error handler of the
memroot. Just return.
*/
return;
}
change= new (change_mem) Item_change_record;
change->place= place;
change->old_value= old_value;
change_list.push_front(change);
}
void THD::change_item_tree_place(Item **old_ref, Item **new_ref)
{
I_List_iterator<Item_change_record> it(change_list);
Item_change_record *change;
while ((change= it++))
{
if (change->place == old_ref)
{
DBUG_PRINT("info", ("change_item_tree_place old_ref %p new_ref %p",
old_ref, new_ref));
change->place= new_ref;
break;
}
}
}
void THD::rollback_item_tree_changes()
{
I_List_iterator<Item_change_record> it(change_list);
Item_change_record *change;
DBUG_ENTER("rollback_item_tree_changes");
while ((change= it++))
{
DBUG_PRINT("info",
("rollback_item_tree_changes "
"place %p curr_value %p old_value %p",
change->place, *change->place, change->old_value));
*change->place= change->old_value;
}
/* We can forget about changes memory: it's allocated in runtime memroot */
change_list.empty();
DBUG_VOID_RETURN;
}
/*****************************************************************************
** Functions to provide a interface to select results
*****************************************************************************/
select_result::select_result():
estimated_rowcount(0)
{
thd=current_thd;
}
void select_result::send_error(uint errcode,const char *err)
{
my_message(errcode, err, MYF(0));
}
void select_result::cleanup()
{
/* do nothing */
}
bool select_result::check_simple_select() const
{
my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0));
return TRUE;
}
static const String default_line_term("\n",default_charset_info);
static const String default_escaped("\\",default_charset_info);
static const String default_field_term("\t",default_charset_info);
static const String default_xml_row_term("<row>", default_charset_info);
static const String my_empty_string("",default_charset_info);
sql_exchange::sql_exchange(char *name, bool flag,
enum enum_filetype filetype_arg)
:file_name(name), opt_enclosed(0), dumpfile(flag),
compressed_chunk_expr(nullptr), load_compressed(0), skip_lines(0)
{
filetype= filetype_arg;
field_term= &default_field_term;
enclosed= line_start= &my_empty_string;
line_term= filetype == FILETYPE_CSV ?
&default_line_term : &default_xml_row_term;
escaped= &default_escaped;
cs= NULL;
}
bool sql_exchange::escaped_given(void)
{
return escaped != &default_escaped;
}
bool select_send::send_result_set_metadata(List<Item> &list, uint flags)
{
bool res;
if (!(res= thd->protocol->send_result_set_metadata(&list, flags)))
is_result_set_started= 1;
return res;
}
void select_send::abort_result_set()
{
DBUG_ENTER("select_send::abort_result_set");
if (is_result_set_started && thd->sp_runtime_ctx)
{
/*
We're executing a stored procedure, have an open result
set and an SQL exception condition. In this situation we
must abort the current statement, silence the error and
start executing the continue/exit handler if one is found.
Before aborting the statement, let's end the open result set, as
otherwise the client will hang due to the violation of the
client/server protocol.
*/
thd->sp_runtime_ctx->end_partial_result_set= TRUE;
}
DBUG_VOID_RETURN;
}
/**
Cleanup an instance of this class for re-use
at next execution of a prepared statement/
stored procedure statement.
*/
void select_send::cleanup()
{
is_result_set_started= FALSE;
}
/* Send data to client. Returns 0 if ok */
bool select_send::send_data(List<Item> &items)
{
Protocol *protocol= thd->protocol;
DBUG_ENTER("select_send::send_data");
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(FALSE);
}
if (thd->killed == THD::ABORT_QUERY)
DBUG_RETURN(FALSE);
/*
We may be passing the control from mysqld to the client: release the
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
by thd
*/
ha_release_temporary_latches(thd);
protocol->prepare_for_resend();
if (protocol->send_result_set_row(&items))
{
protocol->remove_last_row();
DBUG_RETURN(TRUE);
}
thd->inc_sent_row_count(1);
thd->status_var.rows_sent++;
protocol->update_checksum();
if (thd->vio_ok())
DBUG_RETURN(protocol->write());
DBUG_RETURN(0);
}
bool select_send::send_eof()
{
/*
We may be passing the control from mysqld to the client: release the
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
by thd
*/
ha_release_temporary_latches(thd);
/*
Don't send EOF if we're in error condition (which implies we've already
sent or are sending an error)
*/
if (thd->is_error())
return TRUE;
::my_eof(thd);
is_result_set_started= 0;
return FALSE;
}
/************************************************************************
Handling writing to file
************************************************************************/
void select_to_file::send_error(uint errcode,const char *err)
{
my_message(errcode, err, MYF(0));
if (file > 0)
{
(void) end_io_cache(&cache);
mysql_file_close(file, MYF(0));
/* Delete file on error */
mysql_file_delete(key_select_to_file, path, MYF(0));
file= -1;
}
}
bool select_to_file::send_eof()
{
int error= MY_TEST(end_io_cache(&cache));
if (mysql_file_close(file, MYF(MY_WME)) || thd->is_error())
error= true;
if (!error)
{
::my_ok(thd,row_count);
}
file= -1;
return error;
}
void select_to_file::close_file_handle()
{
if (file >= 0)
{
(void) end_io_cache(&cache);
mysql_file_close(file, MYF(0));
file= -1;
}
}
void select_to_file::cleanup()
{
/* In case of error send_eof() may be not called: close the file here. */
close_file_handle();
path[0]= '\0';
row_count= 0;
}
select_to_file::~select_to_file()
{
close_file_handle(); // This only happens in case of error
}
/***************************************************************************
** Export of select to textfile
***************************************************************************/
select_export::~select_export()
{
thd->set_sent_row_count(row_count);
DBUG_EXECUTE_IF("print_select_file_fsync_stats", {
// NO_LINT_DEBUG
fprintf(stderr, "[select_to_file][fsync_count] %u\n", n_fsyncs);
}
);
}
/*
Create file with IO cache
SYNOPSIS
create_file()
thd Thread handle
path File name
file_name File name in input
compressed is file compressed
cache IO cache
RETURN
>= 0 File handle
-1 Error
*/
static File create_file(THD *thd, char *path, const char *file_name,
my_bool compressed, IO_CACHE *cache)
{
File file= -1;
bool new_file_created= false;
MY_STAT stat_arg;
uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH;
#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS
option|= MY_REPLACE_DIR; // Force use of db directory
#endif
if (!dirname_length(file_name))
{
strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
NullS);
(void) fn_format(path, file_name, path, "", option);
}
else
(void) fn_format(path, file_name, mysql_real_data_home, "", option);
if (!is_secure_file_path(path))
{
/* Write only allowed to dir or subdir specified by secure_file_priv */
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv", "");
return -1;
}
if (my_stat(path, &stat_arg, MYF(0)))
{
/* Check if file is named pipe */
if (MY_S_ISFIFO(stat_arg.st_mode))
{
if ((file = mysql_file_open(key_select_to_file,
path, O_WRONLY, MYF(MY_WME))) < 0)
{
return -1;
}
}
else
{
my_error(ER_FILE_EXISTS_ERROR, MYF(0), file_name);
return -1;
}
}
else
{
/* Create the file world readable */
if ((file= mysql_file_create(key_select_to_file,
path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
return file;
new_file_created= true;
#ifdef HAVE_FCHMOD
(void) fchmod(file, 0666); // Because of umask()
#else
(void) chmod(path, 0666);
#endif
}
if (init_io_cache_ex(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME),
compressed))
{
mysql_file_close(file, MYF(0));
/* Delete file on error, if it was just created */
if (new_file_created)
mysql_file_delete(key_select_to_file, path, MYF(0));
return -1;
}
return file;
}
/*
Create a new compressed file of the form <filename>.<monotonic number>.zst.
This is used to assign unique names for files created by chunking.
*/
int select_export::open_new_compressed_file()
{
char new_file_buff[FN_REFLEN];
int wr = snprintf(new_file_buff, sizeof(new_file_buff), "%s.%llu.zst",
exchange->file_name, current_chunk_idx++);
if (wr >= FN_REFLEN || wr < 0)
return 1;
if ((file = create_file(thd, path, new_file_buff,
true /* compressed */ , &cache)) < 0)
return 1;
return 0;
}
int
select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
bool blob_flag=0;
bool string_results= FALSE, non_string_results= FALSE;
uncompressed_chunk_size_limit = 0;
Item *chunk_expr = exchange->compressed_chunk_expr;
if (chunk_expr)
{
longlong chunk_size_mb;
if (chunk_expr->type() != Item::INT_ITEM ||
(chunk_size_mb = chunk_expr->val_int()) < 0 ||
chunk_size_mb > max_chunk_limit_mb)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "COMPRESSED");
return 1;
}
uncompressed_chunk_size_limit = chunk_size_mb * 1024 * 1024;
}
unit= u;
if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN)
strmake(path,exchange->file_name,FN_REFLEN-1);
write_cs= exchange->cs ? exchange->cs : &my_charset_bin;
if (chunk_expr)
{
/*
If compression was specified, create a new file with
name <filename>.0.zst.
*/
if (open_new_compressed_file())
return 1;
}
else
{
/*
If compression was not specified, just create a single
file and dump data in it. There will be no chunking.
*/
if ((file = create_file(thd, path, exchange->file_name,
false /* compressed */,
&cache)) < 0)
return 1;
}
/* Check if there is any blobs in data */
{
List_iterator_fast<Item> li(list);
Item *item;
while ((item=li++))
{
if (item->max_length >= MAX_BLOB_WIDTH)
{
blob_flag=1;
break;
}
if (item->result_type() == STRING_RESULT)
string_results= TRUE;
else
non_string_results= TRUE;
}
}
if (exchange->escaped->numchars() > 1 || exchange->enclosed->numchars() > 1)
{
my_error(ER_WRONG_FIELD_TERMINATORS, MYF(0));
return TRUE;
}
if (exchange->escaped->length() > 1 || exchange->enclosed->length() > 1 ||
!my_isascii(exchange->escaped->ptr()[0]) ||
!my_isascii(exchange->enclosed->ptr()[0]) ||
!exchange->field_term->is_ascii() || !exchange->line_term->is_ascii() ||
!exchange->line_start->is_ascii())
{
/*
Current LOAD DATA INFILE recognizes field/line separators "as is" without
converting from client charset to data file charset. So, it is supposed,
that input file of LOAD DATA INFILE consists of data in one charset and
separators in other charset. For the compatibility with that [buggy]
behaviour SELECT INTO OUTFILE implementation has been saved "as is" too,
but the new warning message has been added:
Non-ASCII separator arguments are not fully supported
*/
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
}
field_term_length=exchange->field_term->length();
field_term_char= field_term_length ?
(int) (uchar) (*exchange->field_term)[0] : INT_MAX;
if (!exchange->line_term->length())
exchange->line_term=exchange->field_term; // Use this if it exists
field_sep_char= (exchange->enclosed->length() ?
(int) (uchar) (*exchange->enclosed)[0] : field_term_char);
if (exchange->escaped->length() && (exchange->escaped_given() ||
!(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
escape_char= (int) (uchar) (*exchange->escaped)[0];
else
escape_char= -1;
is_ambiguous_field_sep= MY_TEST(strchr(ESCAPE_CHARS, field_sep_char));
is_unsafe_field_sep= MY_TEST(strchr(NUMERIC_CHARS, field_sep_char));
line_sep_char= (exchange->line_term->length() ?
(int) (uchar) (*exchange->line_term)[0] : INT_MAX);
if (!field_term_length)
exchange->opt_enclosed=0;
if (!exchange->enclosed->length())
exchange->opt_enclosed=1; // A little quicker loop
fixed_row_size= (!field_term_length && !exchange->enclosed->length() &&
!blob_flag);
if ((is_ambiguous_field_sep && exchange->enclosed->is_empty() &&
(string_results || is_unsafe_field_sep)) ||
(exchange->opt_enclosed && non_string_results &&
field_term_length && strchr(NUMERIC_CHARS, field_term_char)))
{
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM));
is_ambiguous_field_term= TRUE;
}
else
is_ambiguous_field_term= FALSE;
return 0;
}
int select_export::write_io_cache(const uchar *buf, size_t length)
{
int write_err = my_b_write(&cache, buf, length);
uncompressed_chunk_size_current += (write_err == 0 ? length : 0);
return write_err;
}
#define NEED_ESCAPING(x) ((int) (uchar) (x) == escape_char || \
(enclosed ? (int) (uchar) (x) == field_sep_char \
: (int) (uchar) (x) == field_term_char) || \
(int) (uchar) (x) == line_sep_char || \
!(x))
bool select_export::send_data(List<Item> &items)
{
DBUG_ENTER("select_export::send_data");
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
char cvt_buff[MAX_FIELD_WIDTH];
String cvt_str(cvt_buff, sizeof(cvt_buff), write_cs);
bool space_inited=0;
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
tmp.length(0);
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
if (thd->killed == THD::ABORT_QUERY)
DBUG_RETURN(0);
row_count++;
Item *item;
uint used_length=0,items_left=items.elements;
List_iterator_fast<Item> li(items);
/*
Check if we have crossed the chunk size limit. Open a new file
if that is the case.
In case of special value of '0' to COMPRESSED, we do not implement
chunking.
*/
if (uncompressed_chunk_size_limit > 0 &&
uncompressed_chunk_size_current >= uncompressed_chunk_size_limit)
{
/* close file handle of previous file */
close_file_handle();
uncompressed_chunk_size_current = 0;
if (open_new_compressed_file())
goto err;
}
if (write_io_cache((uchar*) exchange->line_start->ptr(),
exchange->line_start->length()))
goto err;
while ((item=li++))
{
Item_result result_type=item->result_type();
bool enclosed = (exchange->enclosed->length() &&
(!exchange->opt_enclosed || result_type == STRING_RESULT));
res=item->str_result(&tmp);
if (res && !my_charset_same(write_cs, res->charset()) &&
!my_charset_same(write_cs, &my_charset_bin))
{
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
const char *from_end_pos;
const char *error_pos;
uint32 bytes;
uint64 estimated_bytes=
((uint64) res->length() / res->charset()->mbminlen + 1) *
write_cs->mbmaxlen + 1;
set_if_smaller(estimated_bytes, UINT_MAX32);
if (cvt_str.realloc((uint32) estimated_bytes))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), (uint32) estimated_bytes);
goto err;
}
bytes= well_formed_copy_nchars(write_cs, (char *) cvt_str.ptr(),
cvt_str.alloced_length(),
res->charset(), res->ptr(), res->length(),
UINT_MAX32, // copy all input chars,
// i.e. ignore nchars parameter
&well_formed_error_pos,
&cannot_convert_error_pos,
&from_end_pos);
error_pos= well_formed_error_pos ? well_formed_error_pos
: cannot_convert_error_pos;
if (error_pos)
{
char printable_buff[32];
convert_to_printable(printable_buff, sizeof(printable_buff),
error_pos, res->ptr() + res->length() - error_pos,
res->charset(), 6);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"string", printable_buff,
item->item_name.ptr(), static_cast<long>(row_count));
}
else if (from_end_pos < res->ptr() + res->length())
{
/*
result is longer than UINT_MAX32 and doesn't fit into String
*/
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED),
item->full_name(), static_cast<long>(row_count));
}
cvt_str.length(bytes);
res= &cvt_str;
}
if (res && enclosed)
{
if (write_io_cache((uchar*) exchange->enclosed->ptr(),
exchange->enclosed->length()))
goto err;
}
if (!res)
{ // NULL
if (!fixed_row_size)
{
if (escape_char != -1) // Use \N syntax
{
null_buff[0]=escape_char;
null_buff[1]='N';
if (write_io_cache((uchar*) null_buff,2))
goto err;
}
else if (write_io_cache((uchar*) "NULL",4))
goto err;
}
else
{
used_length=0; // Fill with space
}
}
else
{
if (fixed_row_size)
used_length=min(res->length(),item->max_length);
else
used_length=res->length();
if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
escape_char != -1)
{
char *pos, *start, *end;
const CHARSET_INFO *res_charset= res->charset();
const CHARSET_INFO *character_set_client=
thd->variables.character_set_client;
bool check_second_byte= (res_charset == &my_charset_bin) &&
character_set_client->
escape_with_backslash_is_dangerous;
DBUG_ASSERT(character_set_client->mbmaxlen == 2 ||
!character_set_client->escape_with_backslash_is_dangerous);
for (start=pos=(char*) res->ptr(),end=pos+used_length ;
pos != end ;
pos++)
{
#ifdef USE_MB
if (use_mb(res_charset))
{
int l;
if ((l=my_ismbchar(res_charset, pos, end)))
{
pos += l-1;
continue;
}
}
#endif
/*
Special case when dumping BINARY/VARBINARY/BLOB values
for the clients with character sets big5, cp932, gbk and sjis,
which can have the escape character (0x5C "\" by default)
as the second byte of a multi-byte sequence.
If
- pos[0] is a valid multi-byte head (e.g 0xEE) and
- pos[1] is 0x00, which will be escaped as "\0",
then we'll get "0xEE + 0x5C + 0x30" in the output file.
If this file is later loaded using this sequence of commands:
mysql> create table t1 (a varchar(128)) character set big5;
mysql> LOAD DATA INFILE 'dump.txt' INTO TABLE t1;
then 0x5C will be misinterpreted as the second byte
of a multi-byte character "0xEE + 0x5C", instead of
escape character for 0x00.
To avoid this confusion, we'll escape the multi-byte
head character too, so the sequence "0xEE + 0x00" will be
dumped as "0x5C + 0xEE + 0x5C + 0x30".
Note, in the condition below we only check if
mbcharlen is equal to 2, because there are no
character sets with mbmaxlen longer than 2
and with escape_with_backslash_is_dangerous set.
DBUG_ASSERT before the loop makes that sure.
*/
if ((NEED_ESCAPING(*pos) ||
(check_second_byte &&
my_mbcharlen(character_set_client, (uchar) *pos) == 2 &&
pos + 1 < end &&
NEED_ESCAPING(pos[1]))) &&
/*
Don't escape field_term_char by doubling - doubling is only
valid for ENCLOSED BY characters:
*/
(enclosed || !is_ambiguous_field_term ||
(int) (uchar) *pos != field_term_char))
{
char tmp_buff[2];
tmp_buff[0]= ((int) (uchar) *pos == field_sep_char &&
is_ambiguous_field_sep) ?
field_sep_char : escape_char;
tmp_buff[1]= *pos ? *pos : '0';
if (write_io_cache((uchar*) start,(uint) (pos-start)) ||
write_io_cache((uchar*) tmp_buff,2))
goto err;
start=pos+1;
}
}
if (write_io_cache((uchar*) start,(uint) (pos-start)))
goto err;
}
else if (write_io_cache((uchar*) res->ptr(),used_length))
goto err;
}
if (fixed_row_size)
{ // Fill with space
if (item->max_length > used_length)
{
/* QQ: Fix by adding a my_b_fill() function */
if (!space_inited)
{
space_inited=1;
memset(space, ' ', sizeof(space));
}
uint length=item->max_length-used_length;
for (; length > sizeof(space) ; length-=sizeof(space))
{
if (write_io_cache((uchar*) space,sizeof(space)))
goto err;
}
if (write_io_cache((uchar*) space,length))
goto err;
}
}
if (res && enclosed)
{
if (write_io_cache((uchar*) exchange->enclosed->ptr(),
exchange->enclosed->length()))
goto err;
}
if (--items_left)
{
if (write_io_cache((uchar*) exchange->field_term->ptr(),
field_term_length))
goto err;
}
}
if (write_io_cache((uchar*) exchange->line_term->ptr(),
exchange->line_term->length()))
goto err;
/* fsync the file after every select_into_file_fsync_size bytes
optionally sleep */
if (thd->variables.select_into_file_fsync_size) {
my_off_t cur_fsize = my_b_tell(&cache);
if (cur_fsize - last_fsync_off >=
thd->variables.select_into_file_fsync_size) {
if (flush_io_cache(&cache) || mysql_file_sync(cache.file, MYF(MY_WME)))
goto err;
else {
#ifndef DBUG_OFF
n_fsyncs++;
#endif
last_fsync_off = cur_fsize;
if (thd->variables.select_into_file_fsync_timeout)
my_sleep(thd->variables.select_into_file_fsync_timeout * 1000);
}
}
}
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
}
/***************************************************************************
** Dump of select to a binary file
***************************************************************************/
int
select_dump::prepare(List<Item> &list MY_ATTRIBUTE((unused)),
SELECT_LEX_UNIT *u)
{
unit= u;
return (int) ((file= create_file(thd, path, exchange->file_name,
0 /* compressed */, &cache)) < 0);
}
bool select_dump::send_data(List<Item> &items)
{
List_iterator_fast<Item> li(items);
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
tmp.length(0);
Item *item;
DBUG_ENTER("select_dump::send_data");
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
if (thd->killed == THD::ABORT_QUERY)
DBUG_RETURN(0);
if (row_count++ > 1)
{
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
goto err;
}
while ((item=li++))
{
res=item->str_result(&tmp);
if (!res) // If NULL
{
if (my_b_write(&cache,(uchar*) "",1))
goto err;
}
else if (my_b_write(&cache,(uchar*) res->ptr(),res->length()))
{
char errbuf[MYSYS_STRERROR_SIZE];
my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno,
my_strerror(errbuf, sizeof(errbuf), my_errno));
goto err;
}
}
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
}
select_subselect::select_subselect(Item_subselect *item_arg)
{
item= item_arg;
}
bool select_singlerow_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_singlerow_subselect::send_data");
Item_singlerow_subselect *it= (Item_singlerow_subselect *)item;
if (it->assigned())
{
my_message(ER_SUBQUERY_NO_1_ROW, ER(ER_SUBQUERY_NO_1_ROW), MYF(0));
DBUG_RETURN(1);
}
if (unit->offset_limit_cnt)
{ // Using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
if (thd->killed == THD::ABORT_QUERY)
DBUG_RETURN(0);
List_iterator_fast<Item> li(items);
Item *val_item;
for (uint i= 0; (val_item= li++); i++)
it->store(i, val_item);
it->assigned(1);
DBUG_RETURN(0);
}
void select_max_min_finder_subselect::cleanup()
{
DBUG_ENTER("select_max_min_finder_subselect::cleanup");
cache= 0;
DBUG_VOID_RETURN;
}
bool select_max_min_finder_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_max_min_finder_subselect::send_data");
Item_maxmin_subselect *it= (Item_maxmin_subselect *)item;
List_iterator_fast<Item> li(items);
Item *val_item= li++;
it->register_value();
if (it->assigned())
{
cache->store(val_item);
if ((this->*op)())
it->store(0, cache);
}
else
{
if (!cache)
{
cache= Item_cache::get_cache(val_item);
switch (val_item->result_type())
{
case REAL_RESULT:
op= &select_max_min_finder_subselect::cmp_real;
break;
case INT_RESULT:
op= &select_max_min_finder_subselect::cmp_int;
break;
case STRING_RESULT:
op= &select_max_min_finder_subselect::cmp_str;
break;
case DECIMAL_RESULT:
op= &select_max_min_finder_subselect::cmp_decimal;
break;
case ROW_RESULT:
// This case should never be choosen
DBUG_ASSERT(0);
op= 0;
}
}
cache->store(val_item);
it->store(0, cache);
}
it->assigned(1);
DBUG_RETURN(0);
}
/**
Compare two floating point numbers for MAX or MIN.
Compare two numbers and decide if the number should be cached as the
maximum/minimum number seen this far. If fmax==true, this is a
comparison for MAX, otherwise it is a comparison for MIN.
val1 is the new numer to compare against the current
maximum/minimum. val2 is the current maximum/minimum.
ignore_nulls is used to control behavior when comparing with a NULL
value. If ignore_nulls==false, the behavior is to store the first
NULL value discovered (i.e, return true, that it is larger than the
current maximum) and never replace it. If ignore_nulls==true, NULL
values are not stored. ANY subqueries use ignore_nulls==true, ALL
subqueries use ignore_nulls==false.
@retval true if the new number should be the new maximum/minimum.
@retval false if the maximum/minimum should stay unchanged.
*/
bool select_max_min_finder_subselect::cmp_real()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
double val1= cache->val_real(), val2= maxmin->val_real();
/*
If we're ignoring NULLs and the current maximum/minimum is NULL
(must have been placed there as the first value iterated over) and
the new value is not NULL, return true so that a new, non-NULL
maximum/minimum is set. Otherwise, return false to keep the
current non-NULL maximum/minimum.
If we're not ignoring NULLs and the current maximum/minimum is not
NULL, return true to store NULL. Otherwise, return false to keep
the NULL we've already got.
*/
if (cache->null_value || maxmin->null_value)
return (ignore_nulls) ? !(cache->null_value) : !(maxmin->null_value);
return (fmax) ? (val1 > val2) : (val1 < val2);
}
/**
Compare two integer numbers for MAX or MIN.
@see select_max_min_finder_subselect::cmp_real()
*/
bool select_max_min_finder_subselect::cmp_int()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
longlong val1= cache->val_int(), val2= maxmin->val_int();
if (cache->null_value || maxmin->null_value)
return (ignore_nulls) ? !(cache->null_value) : !(maxmin->null_value);
return (fmax) ? (val1 > val2) : (val1 < val2);
}
/**
Compare two decimal numbers for MAX or MIN.
@see select_max_min_finder_subselect::cmp_real()
*/
bool select_max_min_finder_subselect::cmp_decimal()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
my_decimal cval, *cvalue= cache->val_decimal(&cval);
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
if (cache->null_value || maxmin->null_value)
return (ignore_nulls) ? !(cache->null_value) : !(maxmin->null_value);
return (fmax)
? (my_decimal_cmp(cvalue,mvalue) > 0)
: (my_decimal_cmp(cvalue,mvalue) < 0);
}
/**
Compare two strings for MAX or MIN.
@see select_max_min_finder_subselect::cmp_real()
*/
bool select_max_min_finder_subselect::cmp_str()
{
String *val1, *val2, buf1, buf2;
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
/*
as far as both operand is Item_cache buf1 & buf2 will not be used,
but added for safety
*/
val1= cache->val_str(&buf1);
val2= maxmin->val_str(&buf1);
if (cache->null_value || maxmin->null_value)
return (ignore_nulls) ? !(cache->null_value) : !(maxmin->null_value);
return (fmax)
? (sortcmp(val1, val2, cache->collation.collation) > 0)
: (sortcmp(val1, val2, cache->collation.collation) < 0);
}
bool select_exists_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_exists_subselect::send_data");
Item_exists_subselect *it= (Item_exists_subselect *)item;
if (unit->offset_limit_cnt)
{ // Using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
if (thd->killed == THD::ABORT_QUERY)
DBUG_RETURN(0);
/*
A subquery may be evaluated 1) by executing the JOIN 2) by optimized
functions (index_subquery, subquery materialization).
It's only in (1) that we get here when we find a row. In (2) "value" is
set elsewhere.
*/
it->value= 1;
it->assigned(1);
DBUG_RETURN(0);
}
/***************************************************************************
Dump of select to variables
***************************************************************************/
int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
if (var_list.elements != list.elements)
{
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0));
return 1;
}
return 0;
}
bool select_dumpvar::check_simple_select() const
{
my_error(ER_SP_BAD_CURSOR_SELECT, MYF(0));
return TRUE;
}
void select_dumpvar::cleanup()
{
row_count= 0;
}
Query_arena::Type Query_arena::type() const
{
DBUG_ASSERT(0); /* Should never be called */
return STATEMENT;
}
void Query_arena::free_items()
{
Item *next;
DBUG_ENTER("Query_arena::free_items");
/* This works because items are allocated with sql_alloc() */
for (; free_list; free_list= next)
{
next= free_list->next;
free_list->delete_self();
}
/* Postcondition: free_list is 0 */
DBUG_VOID_RETURN;
}
void Query_arena::set_query_arena(Query_arena *set)
{
mem_root= set->mem_root;
free_list= set->free_list;
state= set->state;
}
void Query_arena::cleanup_stmt()
{
DBUG_ASSERT(! "Query_arena::cleanup_stmt() not implemented");
}
/*
Statement functions
*/
Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
enum_state state_arg, ulong id_arg)
:Query_arena(mem_root_arg, state_arg),
id(id_arg),
mark_used_columns(MARK_COLUMNS_READ),
lex(lex_arg),
db(NULL),
db_length(0)
{
name.str= NULL;
}
Query_arena::Type Statement::type() const
{
return STATEMENT;
}
void Statement::set_statement(Statement *stmt)
{
id= stmt->id;
mark_used_columns= stmt->mark_used_columns;
lex= stmt->lex;
query_string= stmt->query_string;
}
void
Statement::set_n_backup_statement(Statement *stmt, Statement *backup)
{
DBUG_ENTER("Statement::set_n_backup_statement");
backup->set_statement(this);
set_statement(stmt);
DBUG_VOID_RETURN;
}
void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
{
DBUG_ENTER("Statement::restore_backup_statement");
stmt->set_statement(this);
set_statement(backup);
DBUG_VOID_RETURN;
}
void THD::end_statement()
{
/* Cleanup SQL processing state to reuse this statement in next query. */
lex_end(lex);
delete lex->result;
lex->result= 0;
/* Note that free_list is freed in cleanup_after_query() */
/*
Don't free mem_root, as mem_root is freed in the end of dispatch_command
(once for any command).
*/
}
void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
{
DBUG_ENTER("THD::set_n_backup_active_arena");
DBUG_ASSERT(backup->is_backup_arena == FALSE);
backup->set_query_arena(this);
set_query_arena(set);
#ifndef DBUG_OFF
backup->is_backup_arena= TRUE;
#endif
DBUG_VOID_RETURN;
}
void THD::restore_active_arena(Query_arena *set, Query_arena *backup)
{
DBUG_ENTER("THD::restore_active_arena");
DBUG_ASSERT(backup->is_backup_arena);
set->set_query_arena(this);
set_query_arena(backup);
#ifndef DBUG_OFF
backup->is_backup_arena= FALSE;
#endif
DBUG_VOID_RETURN;
}
Statement::~Statement()
{
}
C_MODE_START
static uchar *
get_statement_id_as_hash_key(const uchar *record, size_t *key_length,
my_bool not_used MY_ATTRIBUTE((unused)))
{
const Statement *statement= (const Statement *) record;
*key_length= sizeof(statement->id);
return (uchar *) &((const Statement *) statement)->id;
}
static void delete_statement_as_hash_key(void *key)
{
delete (Statement *) key;
}
static uchar *get_stmt_name_hash_key(Statement *entry, size_t *length,
my_bool not_used MY_ATTRIBUTE((unused)))
{
*length= entry->name.length;
return (uchar*) entry->name.str;
}
C_MODE_END
Statement_map::Statement_map() :
last_found_statement(0)
{
enum
{
START_STMT_HASH_SIZE = 16,
START_NAME_HASH_SIZE = 16
};
my_hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
get_statement_id_as_hash_key,
delete_statement_as_hash_key, MYF(0));
my_hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
(my_hash_get_key) get_stmt_name_hash_key,
NULL,MYF(0));
}
/*
Insert a new statement to the thread-local statement map.
DESCRIPTION
If there was an old statement with the same name, replace it with the
new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
increase prepared_stmt_count, and insert the new statement. It's okay
to delete an old statement and fail to insert the new one.
POSTCONDITIONS
All named prepared statements are also present in names_hash.
Statement names in names_hash are unique.
The statement is added only if prepared_stmt_count < max_prepard_stmt_count
last_found_statement always points to a valid statement or is 0
RETURN VALUE
0 success
1 error: out of resources or max_prepared_stmt_count limit has been
reached. An error is sent to the client, the statement is deleted.
*/
int Statement_map::insert(THD *thd, Statement *statement)
{
if (my_hash_insert(&st_hash, (uchar*) statement))
{
/*
Delete is needed only in case of an insert failure. In all other
cases hash_delete will also delete the statement.
*/
delete statement;
my_error(ER_OUT_OF_RESOURCES, MYF(0));
goto err_st_hash;
}
if (statement->name.str && my_hash_insert(&names_hash, (uchar*) statement))
{
my_error(ER_OUT_OF_RESOURCES, MYF(0));
goto err_names_hash;
}
mysql_mutex_lock(&LOCK_prepared_stmt_count);
/*
We don't check that prepared_stmt_count is <= max_prepared_stmt_count
because we would like to allow to lower the total limit
of prepared statements below the current count. In that case
no new statements can be added until prepared_stmt_count drops below
the limit.
*/
if (prepared_stmt_count >= max_prepared_stmt_count)
{
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
max_prepared_stmt_count);
goto err_max;
}
prepared_stmt_count++;
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
last_found_statement= statement;
return 0;
err_max:
if (statement->name.str)
my_hash_delete(&names_hash, (uchar*) statement);
err_names_hash:
my_hash_delete(&st_hash, (uchar*) statement);
err_st_hash:
return 1;
}
void Statement_map::close_transient_cursors()
{
#ifdef TO_BE_IMPLEMENTED
Statement *stmt;
while ((stmt= transient_cursor_list.head()))
stmt->close_cursor(); /* deletes itself from the list */
#endif
}
void Statement_map::erase(Statement *statement)
{
if (statement == last_found_statement)
last_found_statement= 0;
if (statement->name.str)
my_hash_delete(&names_hash, (uchar *) statement);
my_hash_delete(&st_hash, (uchar *) statement);
mysql_mutex_lock(&LOCK_prepared_stmt_count);
DBUG_ASSERT(prepared_stmt_count > 0);
prepared_stmt_count--;
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
}
void Statement_map::reset()
{
/* Must be first, hash_free will reset st_hash.records */
mysql_mutex_lock(&LOCK_prepared_stmt_count);
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
prepared_stmt_count-= st_hash.records;
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
my_hash_reset(&names_hash);
my_hash_reset(&st_hash);
last_found_statement= 0;
}
Statement_map::~Statement_map()
{
/*
We do not want to grab the global LOCK_prepared_stmt_count mutex here.
reset() should already have been called to maintain prepared_stmt_count.
*/
DBUG_ASSERT(st_hash.records == 0);
my_hash_free(&names_hash);
my_hash_free(&st_hash);
}
bool select_dumpvar::send_data(List<Item> &items)
{
List_iterator_fast<my_var> var_li(var_list);
List_iterator<Item> it(items);
Item *item;
my_var *mv;
DBUG_ENTER("select_dumpvar::send_data");
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(false);
}
if (row_count++)
{
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
DBUG_RETURN(true);
}
while ((mv= var_li++) && (item= it++))
{
if (mv->local)
{
if (thd->sp_runtime_ctx->set_variable(thd, mv->offset, &item))
DBUG_RETURN(true);
}
else
{
/*
Create Item_func_set_user_vars with delayed non-constness. We
do this so that Item_get_user_var::const_item() will return
the same result during
Item_func_set_user_var::save_item_result() as they did during
optimization and execution.
*/
Item_func_set_user_var *suv=
new Item_func_set_user_var(mv->s, item, true);
if (suv->fix_fields(thd, 0))
DBUG_RETURN(true);
suv->save_item_result(item);
if (suv->update())
DBUG_RETURN(true);
}
}
DBUG_RETURN(thd->is_error());
}
bool select_dumpvar::send_eof()
{
if (! row_count)
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA));
/*
Don't send EOF if we're in error condition (which implies we've already
sent or are sending an error)
*/
if (thd->is_error())
return true;
::my_ok(thd,row_count);
return 0;
}
/****************************************************************************
TMP_TABLE_PARAM
****************************************************************************/
void TMP_TABLE_PARAM::init()
{
DBUG_ENTER("TMP_TABLE_PARAM::init");
DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
field_count= sum_func_count= func_count= hidden_field_count= 0;
group_parts= group_length= group_null_parts= 0;
quick_group= 1;
table_charset= 0;
precomputed_group_by= 0;
skip_create_table= 0;
bit_fields_as_long= 0;
recinfo= 0;
start_recinfo= 0;
keyinfo= 0;
DBUG_VOID_RETURN;
}
void thd_increment_bytes_sent(ulong length)
{
THD *thd=current_thd;
if (likely(thd != 0))
{ /* current_thd==0 when close_connection() calls net_send_error() */
thd->status_var.bytes_sent+= length;
USER_STATS *us= thd_get_user_stats(thd);
us->bytes_sent.inc(length);
}
}
void thd_increment_bytes_received(ulong length)
{
current_thd->status_var.bytes_received+= length;
USER_STATS *us= thd_get_user_stats(current_thd);
us->bytes_received.inc(length);
}
/**
Reinit session status vars for new or done session.
*/
void THD::set_status_var_init()
{
memset(&status_var, 0, sizeof(status_var));
}
/**
Set session status vars.
*/
void THD::set_status_var(system_status_var &src)
{
memcpy(&status_var, &src, offsetof(STATUS_VAR, first_norefresh_status_var));
}
/**
Reset session status vars for FLUSH STATISTICS.
*/
void THD::refresh_status_vars()
{
memset(&status_var, 0, offsetof(STATUS_VAR, first_norefresh_status_var));
/* Handle special vars here. */
status_var.tmp_table_disk_usage_peak = status_var.tmp_table_disk_usage;
status_var.filesort_disk_usage_peak = status_var.filesort_disk_usage;
}
void Security_context::init()
{
user= 0;
ip.set("", 0, system_charset_info);
host.set("", 0, system_charset_info);
external_user.set("", 0, system_charset_info);
host_or_ip= "connecting host";
priv_user[0]= priv_host[0]= proxy_user[0]= '\0';
master_access= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
db_access= NO_ACCESS;
#endif
password_expired= false;
}
void Security_context::destroy()
{
if (host.ptr() != my_localhost && host.length())
{
char *c= (char *) host.ptr();
host.set("", 0, system_charset_info);
my_free(c);
}
if (user && user != delayed_user)
{
my_free(user);
user= NULL;
}
if (external_user.length())
{
char *c= (char *) external_user.ptr();
external_user.set("", 0, system_charset_info);
my_free(c);
}
if (ip.length())
{
char *c= (char *) ip.ptr();
ip.set("", 0, system_charset_info);
my_free(c);
}
}
void Security_context::skip_grants()
{
/* privileges for the user are unknown everything is allowed */
host_or_ip= (char *)"";
master_access= ~NO_ACCESS;
*priv_user= *priv_host= '\0';
}
bool Security_context::set_user(char *user_arg)
{
my_free(user);
user= my_strdup(user_arg, MYF(0));
return user == 0;
}
String *Security_context::get_host()
{
return (&host);
}
String *Security_context::get_ip()
{
return (&ip);
}
String *Security_context::get_external_user()
{
return (&external_user);
}
void Security_context::set_host(const char *str)
{
uint len= str ? strlen(str) : 0;
host.set(str, len, system_charset_info);
}
void Security_context::set_ip(const char *str)
{
uint len= str ? strlen(str) : 0;
ip.set(str, len, system_charset_info);
}
void Security_context::set_external_user(const char *str)
{
uint len= str ? strlen(str) : 0;
external_user.set(str, len, system_charset_info);
}
void Security_context::set_host(const char * str, size_t len)
{
host.set(str, len, system_charset_info);
host.c_ptr_quick();
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/**
Initialize this security context from the passed in credentials
and activate it in the current thread.
@param thd
@param definer_user
@param definer_host
@param db
@param[out] backup Save a pointer to the current security context
in the thread. In case of success it points to the
saved old context, otherwise it points to NULL.
During execution of a statement, multiple security contexts may
be needed:
- the security context of the authenticated user, used as the
default security context for all top-level statements
- in case of a view or a stored program, possibly the security
context of the definer of the routine, if the object is
defined with SQL SECURITY DEFINER option.
The currently "active" security context is parameterized in THD
member security_ctx. By default, after a connection is
established, this member points at the "main" security context
- the credentials of the authenticated user.
Later, if we would like to execute some sub-statement or a part
of a statement under credentials of a different user, e.g.
definer of a procedure, we authenticate this user in a local
instance of Security_context by means of this method (and
ultimately by means of acl_getroot), and make the
local instance active in the thread by re-setting
thd->security_ctx pointer.
Note, that the life cycle and memory management of the "main" and
temporary security contexts are different.
For the main security context, the memory for user/host/ip is
allocated on system heap, and the THD class frees this memory in
its destructor. The only case when contents of the main security
context may change during its life time is when someone issued
CHANGE USER command.
Memory management of a "temporary" security context is
responsibility of the module that creates it.
@retval TRUE there is no user with the given credentials. The erro
is reported in the thread.
@retval FALSE success
*/
bool
Security_context::
change_security_context(THD *thd,
LEX_STRING *definer_user,
LEX_STRING *definer_host,
LEX_STRING *db,
Security_context **backup)
{
bool needs_change;
DBUG_ENTER("Security_context::change_security_context");
DBUG_ASSERT(definer_user->str && definer_host->str);
*backup= NULL;
needs_change= (strcmp(definer_user->str, thd->security_ctx->priv_user) ||
my_strcasecmp(system_charset_info, definer_host->str,
thd->security_ctx->priv_host));
if (needs_change)
{
if (acl_getroot(this, definer_user->str, definer_host->str,
definer_host->str, db->str))
{
my_error(ER_NO_SUCH_USER, MYF(0), definer_user->str,
definer_host->str);
DBUG_RETURN(TRUE);
}
*backup= thd->security_ctx;
thd->security_ctx= this;
}
DBUG_RETURN(FALSE);
}
void
Security_context::restore_security_context(THD *thd,
Security_context *backup)
{
if (backup)
thd->security_ctx= backup;
}
#endif
bool Security_context::user_matches(Security_context *them)
{
return ((user != NULL) && (them->user != NULL) &&
!strcmp(user, them->user));
}
void Log_throttle::new_window(ulonglong now)
{
count= 0;
window_end= now + window_size;
}
void Slow_log_throttle::new_window(ulonglong now)
{
Log_throttle::new_window(now);
total_exec_time= 0;
total_lock_time= 0;
}
Slow_log_throttle::Slow_log_throttle(ulong *threshold, mysql_mutex_t *lock,
ulong window_usecs,
bool (*logger)(THD *, const char *, uint,
struct system_status_var *),
const char *msg)
: Log_throttle(window_usecs, msg), total_exec_time(0), total_lock_time(0),
rate(threshold), log_summary(logger), LOCK_log_throttle(lock)
{
aggregate_sctx.init();
}
ulong Log_throttle::prepare_summary(ulong rate)
{
ulong ret= 0;
/*
Previous throttling window is over or rate changed.
Return the number of lines we throttled.
*/
if (count > rate)
{
ret= count - rate;
count= 0; // prevent writing it again.
}
return ret;
}
void Slow_log_throttle::print_summary(THD *thd, ulong suppressed,
ulonglong print_lock_time,
ulonglong print_exec_time)
{
/*
We synthesize these values so the totals in the log will be
correct (just in case somebody analyses them), even if the
start/stop times won't be (as they're an aggregate which will
usually mostly lie within [ window_end - window_size ; window_end ]
*/
ulonglong save_start_utime= thd->start_utime;
ulonglong save_utime_after_lock= thd->utime_after_lock;
Security_context *save_sctx= thd->security_ctx;
char buf[128];
snprintf(buf, sizeof(buf), summary_template, suppressed);
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->start_utime= thd->current_utime() - print_exec_time;
thd->utime_after_lock= thd->start_utime + print_lock_time;
thd->security_ctx= (Security_context *) &aggregate_sctx;
mysql_mutex_unlock(&thd->LOCK_thd_data);
(*log_summary)(thd, buf, strlen(buf), NULL);
mysql_mutex_lock(&thd->LOCK_thd_data);
thd->security_ctx = save_sctx;
thd->start_utime = save_start_utime;
thd->utime_after_lock= save_utime_after_lock;
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
bool Slow_log_throttle::flush(THD *thd)
{
// Write summary if we throttled.
mysql_mutex_lock(LOCK_log_throttle);
ulonglong print_lock_time= total_lock_time;
ulonglong print_exec_time= total_exec_time;
ulong suppressed_count= prepare_summary(*rate);
mysql_mutex_unlock(LOCK_log_throttle);
if (suppressed_count > 0)
{
print_summary(thd, suppressed_count, print_lock_time, print_exec_time);
return true;
}
return false;
}
bool Slow_log_throttle::log(THD *thd, bool eligible)
{
bool suppress_current= false;
/*
If throttling is enabled, we might have to write a summary even if
the current query is not of the type we handle.
*/
if (*rate > 0)
{
mysql_mutex_lock(LOCK_log_throttle);
ulong suppressed_count= 0;
ulonglong print_lock_time= total_lock_time;
ulonglong print_exec_time= total_exec_time;
ulonglong end_utime_of_query= thd->current_utime();
/*
If the window has expired, we'll try to write a summary line.
The subroutine will know whether we actually need to.
*/
if (!in_window(end_utime_of_query))
{
suppressed_count= prepare_summary(*rate);
// start new window only if this is the statement type we handle
if (eligible)
new_window(end_utime_of_query);
}
if (eligible && inc_log_count(*rate))
{
/*
Current query's logging should be suppressed.
Add its execution time and lock time to totals for the current window.
*/
total_exec_time += (end_utime_of_query - thd->start_utime);
total_lock_time += (thd->utime_after_lock - thd->start_utime);
suppress_current= true;
}
mysql_mutex_unlock(LOCK_log_throttle);
/*
print_summary() is deferred until after we release the locks to
avoid congestion. All variables we hand in are local to the caller,
so things would even be safe if print_summary() hadn't finished by the
time the next one comes around (60s later at the earliest for now).
The current design will produce correct data, but does not guarantee
order (there is a theoretical race condition here where the above
new_window()/unlock() may enable a different thread to print a warning
for the new window before the current thread gets to print_summary().
If the requirements ever change, add a print_lock to the object that
is held during print_summary(), AND that is briefly locked before
returning from this function if(eligible && !suppress_current).
This should ensure correct ordering of summaries with regard to any
follow-up summaries as well as to any (non-suppressed) warnings (of
the type we handle) from the next window.
*/
if (suppressed_count > 0)
print_summary(thd, suppressed_count, print_lock_time, print_exec_time);
}
return suppress_current;
}
bool Error_log_throttle::log(THD *thd)
{
ulonglong end_utime_of_query= thd->current_utime();
DBUG_EXECUTE_IF("simulate_error_throttle_expiry",
end_utime_of_query+=Log_throttle::LOG_THROTTLE_WINDOW_SIZE;);
/*
If the window has expired, we'll try to write a summary line.
The subroutine will know whether we actually need to.
*/
if (!in_window(end_utime_of_query))
{
ulong suppressed_count= prepare_summary(1);
new_window(end_utime_of_query);
if (suppressed_count > 0)
print_summary(suppressed_count);
}
/*
If this is a first error in the current window then do not suppress it.
*/
return inc_log_count(1);
}
bool Error_log_throttle::flush(THD *thd)
{
// Write summary if we throttled.
ulong suppressed_count= prepare_summary(1);
if (suppressed_count > 0)
{
print_summary(suppressed_count);
return true;
}
return false;
}
/****************************************************************************
Handling of open and locked tables states.
This is used when we want to open/lock (and then close) some tables when
we already have a set of tables open and locked. We use these methods for
access to mysql.proc table to find definitions of stored routines.
****************************************************************************/
void THD::reset_n_backup_open_tables_state(Open_tables_backup *backup)
{
DBUG_ENTER("reset_n_backup_open_tables_state");
backup->set_open_tables_state(this);
backup->mdl_system_tables_svp= mdl_context.mdl_savepoint();
reset_open_tables_state();
state_flags|= Open_tables_state::BACKUPS_AVAIL;
DBUG_VOID_RETURN;
}
void THD::restore_backup_open_tables_state(Open_tables_backup *backup)
{
DBUG_ENTER("restore_backup_open_tables_state");
mdl_context.rollback_to_savepoint(backup->mdl_system_tables_svp);
/*
Before we will throw away current open tables state we want
to be sure that it was properly cleaned up.
*/
DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 &&
derived_tables == 0 &&
lock == 0 &&
locked_tables_mode == LTM_NONE &&
get_reprepare_observer() == NULL);
set_open_tables_state(backup);
DBUG_VOID_RETURN;
}
/**
Check the killed state of a user thread
@param thd user thread
@retval 0 the user thread is active
@retval 1 the user thread has been killed
*/
extern "C" int thd_killed(const MYSQL_THD thd)
{
return(thd->killed);
}
/**
Set the killed status of the current statement.
@param thd user thread connection handle
*/
extern "C" void thd_set_kill_status(const MYSQL_THD thd)
{
thd->send_kill_message();
}
/**
Return the thread id of a user thread
@param thd user thread
@return thread id
*/
extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd)
{
return((unsigned long)thd->thread_id());
}
/**
Check if batching is allowed for the thread
@param thd user thread
@retval 1 batching allowed
@retval 0 batching not allowed
*/
extern "C" int thd_allow_batch(MYSQL_THD thd)
{
if ((thd->variables.option_bits & OPTION_ALLOW_BATCH) ||
(thd->slave_thread && opt_slave_allow_batching))
return 1;
return 0;
}
enum_tx_isolation thd_get_trx_isolation(const MYSQL_THD thd)
{
return thd->tx_isolation;
}
#ifdef INNODB_COMPATIBILITY_HOOKS
extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
{
return(thd->charset());
}
/**
Return the MySQL username of a user thread
@param thd user thread
@return MySQL username
*/
extern "C" const char *thd_user(MYSQL_THD thd)
{
const char *user = "unknown";
if (thd)
{
if (thd->get_user_connect())
{
user = (const_cast<USER_CONN*>(thd->get_user_connect()))->user;
}
}
return(user);
}
/**
Return the source host of a user thread
@param thd user thread
@return MySQL host
*/
extern "C" const char *thd_host(MYSQL_THD thd)
{
const char *host = "unknown";
if (thd)
{
if (thd->get_user_connect())
{
host = (const_cast<USER_CONN*>(thd->get_user_connect()))->host;
}
}
return(host);
}
/**
OBSOLETE : there's no way to ensure the string is null terminated.
Use thd_query_string instead()
*/
extern "C" char **thd_query(MYSQL_THD thd)
{
return (&thd->query_string.string.str);
}
/**
Get the current query string for the thread.
@param The MySQL internal thread pointer
@return query string and length. May be non-null-terminated.
*/
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd)
{
return(&thd->query_string.string);
}
/**
Get the current thread id.
@param The MySQL internal thread pointer
@return thread identifier
*/
extern "C" ulong thd_thread_id(MYSQL_THD thd)
{
return(thd->thread_id());
}
extern "C" int thd_slave_thread(const MYSQL_THD thd)
{
return(thd->slave_thread);
}
extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
{
return thd->transaction.all.has_modified_non_trans_table();
}
extern "C" int thd_binlog_format(const MYSQL_THD thd)
{
if (mysql_bin_log.is_open() && (thd->variables.option_bits & OPTION_BIN_LOG))
return (int) thd->variables.binlog_format;
else
return BINLOG_FORMAT_UNSPEC;
}
extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all)
{
DBUG_ASSERT(thd);
thd->mark_transaction_to_rollback(all);
}
extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd)
{
return binlog_filter->db_ok(thd->db);
}
extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd)
{
return sqlcom_can_generate_row_events(thd->lex->sql_command);
}
extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd)
{
enum durability_properties ret= HA_REGULAR_DURABILITY;
if (thd != NULL)
ret= thd->durability_property;
return ret;
}
/** Get the auto_increment_offset auto_increment_increment.
Needed by InnoDB.
@param thd Thread object
@param off auto_increment_offset
@param inc auto_increment_increment */
extern "C" void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc)
{
*off = thd->variables.auto_increment_offset;
*inc = thd->variables.auto_increment_increment;
}
/**
Is strict sql_mode set.
Needed by InnoDB.
@param thd Thread object
@return True if sql_mode has strict mode (all or trans).
@retval true sql_mode has strict mode (all or trans).
@retval false sql_mode has not strict mode (all or trans).
*/
extern "C" bool thd_is_strict_mode(const MYSQL_THD thd)
{
return thd->is_strict_mode();
}
#ifndef EMBEDDED_LIBRARY
extern "C" void thd_pool_wait_begin(MYSQL_THD thd, int wait_type);
extern "C" void thd_pool_wait_end(MYSQL_THD thd);
static bool filter_wait_type(int wait_type) {
switch (wait_type) {
case THD_WAIT_SLEEP:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_SLEEP;
case THD_WAIT_ROW_LOCK:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_ROW_LOCK;
case THD_WAIT_USER_LOCK:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_USER_LOCK;
case THD_WAIT_NET_IO:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_NET_IO;
case THD_WAIT_YIELD:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_YIELD;
case THD_WAIT_META_DATA_LOCK:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_META_DATA_LOCK;
case THD_WAIT_COMMIT:
return admission_control_wait_events & ADMISSION_CONTROL_THD_WAIT_COMMIT;
default:
return false;
}
}
/**
Some wait types cannot tolerate readmission timeout error.
@return true if wait type needs readmission when done, false otherwise.
*/
static bool readmit_wait_type(int wait_type) {
return wait_type != THD_WAIT_COMMIT;
}
/*
Interface for MySQL Server, plugins and storage engines to report
when they are going to sleep/stall.
SYNOPSIS
thd_wait_begin()
thd Thread object
wait_type Type of wait
1 -- short wait (e.g. for mutex)
2 -- medium wait (e.g. for disk io)
3 -- large wait (e.g. for locked row/table)
NOTES
This is used by the threadpool to have better knowledge of which
threads that currently are actively running on CPUs. When a thread
reports that it's going to sleep/stall, the threadpool scheduler is
free to start another thread in the pool most likely. The expected wait
time is simply an indication of how long the wait is expected to
become, the real wait time could be very different.
thd_wait_end MUST be called immediately after waking up again.
*/
extern "C" void thd_wait_begin(MYSQL_THD thd, int wait_type)
{
if (thd) {
if (thd->is_in_ac) {
if (filter_wait_type(wait_type)) {
multi_tenancy_exit_query(thd);
if (!readmit_wait_type(wait_type)) {
thd->readmission_mode = AC_REQUEST_NONE;
} else {
// For explicit yields, we want to send the query to the back of the
// queue to allow for other queries to run. For other yields, it's
// likely we want to finish the query as soon as possible.
thd->readmission_mode = (wait_type == THD_WAIT_YIELD)
? AC_REQUEST_QUERY_READMIT_LOPRI : AC_REQUEST_QUERY_READMIT_HIPRI;
}
// Assert that thd_wait_begin/thd_wait_end calls should match.
// In case they do not, reset the nesting level in release.
DBUG_ASSERT(thd->readmission_nest_level == 0);
thd->readmission_nest_level = 0;
}
} else if (thd->readmission_mode > AC_REQUEST_NONE) {
// Nested thd_wait_begin so need to skip thd_wait_end.
++thd->readmission_nest_level;
}
}
MYSQL_CALLBACK(thread_scheduler, thd_wait_begin, (thd, wait_type));
}
/**
Interface for MySQL Server, plugins and storage engines to report
when they waking up from a sleep/stall.
@param thd Thread handle
*/
extern "C" void thd_wait_end(MYSQL_THD thd)
{
MYSQL_CALLBACK(thread_scheduler, thd_wait_end, (thd));
if (thd && thd->readmission_mode > AC_REQUEST_NONE) {
if (thd->readmission_nest_level > 0) {
// Skip this nested thd_wait_end call.
--thd->readmission_nest_level;
} else {
if (++thd->readmission_count % 1000 == 0) {
thd->readmission_mode = AC_REQUEST_QUERY_READMIT_LOPRI;
}
multi_tenancy_admit_query(thd, thd->readmission_mode);
thd->readmission_mode = AC_REQUEST_NONE;
}
}
}
#else
extern "C" void thd_wait_begin(MYSQL_THD thd, int wait_type)
{
/* do NOTHING for the embedded library */
return;
}
extern "C" void thd_wait_end(MYSQL_THD thd)
{
/* do NOTHING for the embedded library */
return;
}
#endif
#endif // INNODB_COMPATIBILITY_HOOKS */
/**
Functions to set and get transaction position.
These functions are used to set the transaction position for the
transaction written when committing this transaction. They also
extract the max gtid of all executed transactions
*/
/**@{*/
void THD::set_trans_pos(const char *file, my_off_t pos,
const Cached_group *gtid_group)
{
DBUG_ENTER("THD::set_trans_pos");
DBUG_ASSERT(((file == 0) && (pos == 0)) || ((file != 0) && (pos != 0)));
if (file)
{
DBUG_PRINT("enter", ("file: %s, pos: %llu", file, pos));
// Only the file name should be used, not the full path
m_trans_log_file= file + dirname_length(file);
if (!m_trans_fixed_log_file)
m_trans_fixed_log_file= (char*) alloc_root(&main_mem_root, FN_REFLEN+1);
DBUG_ASSERT(strlen(m_trans_log_file) <= FN_REFLEN);
strcpy(m_trans_fixed_log_file, m_trans_log_file);
}
else
{
m_trans_log_file= NULL;
m_trans_fixed_log_file= NULL;
}
m_trans_end_pos= pos;
if (gtid_group)
{
global_sid_lock->rdlock();
gtid_group->spec.to_string(global_sid_map, trans_gtid);
global_sid_lock->unlock();
m_trans_gtid= trans_gtid;
if (gtid_group->
spec.gtid.greater_than(mysql_bin_log.engine_binlog_max_gtid) ||
mysql_bin_log.engine_binlog_max_gtid.sidno !=
gtid_group->spec.gtid.sidno)
{
mysql_bin_log.engine_binlog_max_gtid= gtid_group->spec.gtid;
}
global_sid_lock->rdlock();
mysql_bin_log.engine_binlog_max_gtid.
to_string(global_sid_map, trans_max_gtid);
global_sid_lock->unlock();
m_trans_max_gtid= trans_max_gtid;
}
else
m_trans_gtid= NULL;
DBUG_PRINT("return", ("m_trans_log_file: %s, m_trans_fixed_log_file: %s, "
"m_trans_end_pos: %llu, m_trans_gtid: %s, m_trans_max_gtid: %s",
m_trans_log_file, m_trans_fixed_log_file,
m_trans_end_pos, m_trans_gtid, m_trans_max_gtid));
DBUG_VOID_RETURN;
}
void THD::get_trans_pos(const char **file_var, my_off_t *pos_var,
const char **gtid_var, const char **max_gtid_var) const
{
DBUG_ENTER("THD::get_trans_pos");
if (file_var)
*file_var = m_trans_log_file;
if (pos_var)
*pos_var= m_trans_end_pos;
if (gtid_var)
*gtid_var = m_trans_gtid;
if (max_gtid_var)
*max_gtid_var = m_trans_max_gtid;
DBUG_PRINT("return", ("file: %s, pos: %llu",
file_var ? *file_var : "<none>",
pos_var ? *pos_var : 0));
DBUG_VOID_RETURN;
}
void THD::get_trans_fixed_pos(const char **file_var, my_off_t *pos_var) const
{
DBUG_ENTER("THD::get_trans_fixed_pos");
if (file_var)
*file_var = m_trans_fixed_log_file;
if (pos_var)
*pos_var= m_trans_end_pos;
DBUG_PRINT("return", ("file: %s, pos: %llu",
file_var ? *file_var : "<none>",
pos_var ? *pos_var : 0));
DBUG_VOID_RETURN;
}
void THD::append_slave_gtid_info(uint id, const char* db, const char* gtid)
{
slave_gtid_infos.push_back(st_slave_gtid_info{id, db, gtid});
}
std::vector<st_slave_gtid_info> THD::get_slave_gtid_info() const
{
return slave_gtid_infos;
}
void THD::clear_slave_gtid_info()
{
slave_gtid_infos.clear();
}
/**@}*/
void THD::get_trans_marker(int64_t *term, int64_t *index) const
{
/* The get and set (below) is used and called today serially during different
* stages of ordered commit. Hence the get and set are mutually exlusive. If
* this changes later, then we may need to protect these by locks */
*term= term_;
*index= index_;
}
void THD::set_trans_marker(int64_t term, int64_t index)
{
term_= term;
index_= index;
}
void THD::clear_safe_purge_file()
{
safe_purge_file.clear();
}
void THD::set_safe_purge_file(std::string purge_file)
{
safe_purge_file= std::move(purge_file);
}
std::string THD::get_safe_purge_file() const
{
return safe_purge_file;
}
/****************************************************************************
Handling of statement states in functions and triggers.
This is used to ensure that the function/trigger gets a clean state
to work with and does not cause any side effects of the calling statement.
It also allows most stored functions and triggers to replicate even
if they are used items that would normally be stored in the binary
replication (like last_insert_id() etc...)
The following things is done
- Disable binary logging for the duration of the statement
- Disable multi-result-sets for the duration of the statement
- Value of last_insert_id() is saved and restored
- Value set by 'SET INSERT_ID=#' is reset and restored
- Value for found_rows() is reset and restored
- examined_row_count is added to the total
- cuted_fields is added to the total
- new savepoint level is created and destroyed
NOTES:
Seed for random() is saved for the first! usage of RAND()
We reset examined_row_count and cuted_fields and add these to the
result to ensure that if we have a bug that would reset these within
a function, we are not loosing any rows from the main statement.
We do not reset value of last_insert_id().
****************************************************************************/
void THD::reset_sub_statement_state(Sub_statement_state *backup,
uint new_state)
{
#ifndef EMBEDDED_LIBRARY
/* BUG#33029, if we are replicating from a buggy master, reset
auto_inc_intervals_forced to prevent substatement
(triggers/functions) from using erroneous INSERT_ID value
*/
if (rpl_master_erroneous_autoinc(this))
{
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
auto_inc_intervals_forced.swap(&backup->auto_inc_intervals_forced);
}
#endif
backup->option_bits= variables.option_bits;
backup->count_cuted_fields= count_cuted_fields;
backup->in_sub_stmt= in_sub_stmt;
backup->enable_slow_log= enable_slow_log;
backup->limit_found_rows= limit_found_rows;
backup->examined_row_count= m_examined_row_count;
backup->sent_row_count= m_sent_row_count;
backup->cuted_fields= cuted_fields;
backup->client_capabilities= client_capabilities;
backup->savepoints= transaction.savepoints;
backup->first_successful_insert_id_in_prev_stmt=
first_successful_insert_id_in_prev_stmt;
backup->first_successful_insert_id_in_cur_stmt=
first_successful_insert_id_in_cur_stmt;
if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) &&
!is_current_stmt_binlog_format_row())
{
variables.option_bits&= ~OPTION_BIN_LOG;
}
if ((backup->option_bits & OPTION_BIN_LOG) &&
is_update_query(lex->sql_command) &&
!is_current_stmt_binlog_format_row())
mysql_bin_log.start_union_events(this, this->query_id);
/* Disable result sets */
client_capabilities &= ~CLIENT_MULTI_RESULTS;
in_sub_stmt|= new_state;
m_examined_row_count= 0;
m_sent_row_count= 0;
cuted_fields= 0;
transaction.savepoints= 0;
first_successful_insert_id_in_cur_stmt= 0;
}
void THD::restore_sub_statement_state(Sub_statement_state *backup)
{
DBUG_ENTER("THD::restore_sub_statement_state");
#ifndef EMBEDDED_LIBRARY
/* BUG#33029, if we are replicating from a buggy master, restore
auto_inc_intervals_forced so that the top statement can use the
INSERT_ID value set before this statement.
*/
if (rpl_master_erroneous_autoinc(this))
{
backup->auto_inc_intervals_forced.swap(&auto_inc_intervals_forced);
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
}
#endif
/*
To save resources we want to release savepoints which were created
during execution of function or trigger before leaving their savepoint
level. It is enough to release first savepoint set on this level since
all later savepoints will be released automatically.
*/
if (transaction.savepoints)
{
SAVEPOINT *sv;
for (sv= transaction.savepoints; sv->prev; sv= sv->prev)
{}
/* ha_release_savepoint() never returns error. */
(void)ha_release_savepoint(this, sv);
}
count_cuted_fields= backup->count_cuted_fields;
transaction.savepoints= backup->savepoints;
variables.option_bits= backup->option_bits;
in_sub_stmt= backup->in_sub_stmt;
enable_slow_log= backup->enable_slow_log;
first_successful_insert_id_in_prev_stmt=
backup->first_successful_insert_id_in_prev_stmt;
first_successful_insert_id_in_cur_stmt=
backup->first_successful_insert_id_in_cur_stmt;
limit_found_rows= backup->limit_found_rows;
set_sent_row_count(backup->sent_row_count);
client_capabilities= backup->client_capabilities;
/*
If we've left sub-statement mode, reset the fatal error flag.
Otherwise keep the current value, to propagate it up the sub-statement
stack.
NOTE: is_fatal_sub_stmt_error can be set only if we've been in the
sub-statement mode.
*/
if (!in_sub_stmt)
is_fatal_sub_stmt_error= false;
if ((variables.option_bits & OPTION_BIN_LOG) && is_update_query(lex->sql_command) &&
!is_current_stmt_binlog_format_row())
mysql_bin_log.stop_union_events(this);
/*
The following is added to the old values as we are interested in the
total complexity of the query
*/
inc_examined_row_count(backup->examined_row_count);
cuted_fields+= backup->cuted_fields;
DBUG_VOID_RETURN;
}
void THD::set_statement(Statement *stmt)
{
mysql_mutex_lock(&LOCK_thd_data);
Statement::set_statement(stmt);
mysql_mutex_unlock(&LOCK_thd_data);
}
void THD::set_sent_row_count(ha_rows count)
{
m_sent_row_count= count;
MYSQL_SET_STATEMENT_ROWS_SENT(m_statement_psi, m_sent_row_count);
}
void THD::set_examined_row_count(ha_rows count)
{
m_examined_row_count= count;
MYSQL_SET_STATEMENT_ROWS_EXAMINED(m_statement_psi, m_examined_row_count);
}
void THD::set_accessed_rows_and_keys(ulonglong count)
{
m_accessed_rows_and_keys= count;
}
void THD::check_limit_rows_examined()
{
if (++m_accessed_rows_and_keys > lex->limit_rows_examined_cnt)
killed= ABORT_QUERY;
}
ulonglong THD::get_rows_examined()
{
return m_accessed_rows_and_keys;
}
void THD::check_yield()
{
DBUG_ASSERT(last_yield_counter <= yield_counter);
yield_counter++;
if (last_yield_counter + admission_control_yield_freq < yield_counter) {
thd_wait_begin(this, THD_WAIT_YIELD);
thd_wait_end(this);
last_yield_counter = yield_counter;
}
}
void THD::inc_sent_row_count(ha_rows count)
{
m_sent_row_count+= count;
MYSQL_SET_STATEMENT_ROWS_SENT(m_statement_psi, m_sent_row_count);
}
void THD::inc_examined_row_count(ha_rows count)
{
m_examined_row_count+= count;
MYSQL_SET_STATEMENT_ROWS_EXAMINED(m_statement_psi, m_examined_row_count);
}
void THD::inc_status_created_tmp_disk_tables()
{
status_var_increment(status_var.created_tmp_disk_tables);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1);
#endif
}
void THD::inc_status_created_tmp_tables()
{
status_var_increment(status_var.created_tmp_tables);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1);
#endif
}
void THD::inc_status_select_full_join()
{
status_var_increment(status_var.select_full_join_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_select_full_join)(m_statement_psi, 1);
#endif
}
void THD::inc_status_select_full_range_join()
{
status_var_increment(status_var.select_full_range_join_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1);
#endif
}
void THD::inc_status_select_range()
{
status_var_increment(status_var.select_range_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_select_range)(m_statement_psi, 1);
#endif
}
void THD::inc_status_select_range_check()
{
status_var_increment(status_var.select_range_check_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_select_range_check)(m_statement_psi, 1);
#endif
}
void THD::inc_status_select_scan()
{
status_var_increment(status_var.select_scan_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_select_scan)(m_statement_psi, 1);
#endif
}
void THD::inc_status_sort_merge_passes()
{
status_var_increment(status_var.filesort_merge_passes);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1);
#endif
}
void THD::inc_status_sort_range()
{
status_var_increment(status_var.filesort_range_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_sort_range)(m_statement_psi, 1);
#endif
}
void THD::inc_status_sort_rows(ha_rows count)
{
statistic_add(status_var.filesort_rows, count, &LOCK_status);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count);
#endif
}
void THD::inc_status_sort_scan()
{
status_var_increment(status_var.filesort_scan_count);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(inc_statement_sort_scan)(m_statement_psi, 1);
#endif
}
void THD::set_status_no_index_used()
{
server_status|= SERVER_QUERY_NO_INDEX_USED;
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(set_statement_no_index_used)(m_statement_psi);
#endif
}
void THD::set_status_no_good_index_used()
{
server_status|= SERVER_QUERY_NO_GOOD_INDEX_USED;
#ifdef HAVE_PSI_STATEMENT_INTERFACE
PSI_STATEMENT_CALL(set_statement_no_good_index_used)(m_statement_psi);
#endif
}
void THD::capture_system_thread_id()
{
#ifdef TARGET_OS_LINUX
system_thread_id = syscall(SYS_gettid);
#else // TARGET_OS_LINUX
system_thread_id = 0;
#endif // TARGET_OS_LINUX
}
void THD::set_command(enum enum_server_command command)
{
m_command= command;
#ifdef HAVE_PSI_THREAD_INTERFACE
PSI_STATEMENT_CALL(set_thread_command)(m_command);
#endif
}
/** Assign a new value to thd->query. */
void THD::set_query(const CSET_STRING &string_arg, bool need_lock)
{
if (need_lock)
mysql_mutex_lock(&LOCK_thd_data);
set_query_inner(string_arg);
if (need_lock)
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
PSI_THREAD_CALL(set_thread_info)(query(), query_length());
#endif
}
/** Assign a new value to thd->query and thd->query_id. */
void THD::set_query_and_id(char *query_arg, uint32 query_length_arg,
const CHARSET_INFO *cs,
query_id_t new_query_id)
{
mysql_mutex_lock(&LOCK_thd_data);
set_query_inner(query_arg, query_length_arg, cs);
query_id= new_query_id;
mysql_mutex_unlock(&LOCK_thd_data);
}
/** Assign a new value to thd->query_id. */
void THD::set_query_id(query_id_t new_query_id)
{
mysql_mutex_lock(&LOCK_thd_data);
query_id= new_query_id;
mysql_mutex_unlock(&LOCK_thd_data);
}
/** Assign a new value to thd->mysys_var. */
void THD::set_mysys_var(struct st_my_thread_var *new_mysys_var)
{
mysql_mutex_lock(&LOCK_thd_data);
mysys_var= new_mysys_var;
mysql_mutex_unlock(&LOCK_thd_data);
}
/**
Leave explicit LOCK TABLES or prelocked mode and restore value of
transaction sentinel in MDL subsystem.
*/
void THD::leave_locked_tables_mode()
{
if (locked_tables_mode == LTM_LOCK_TABLES)
{
/*
When leaving LOCK TABLES mode we have to change the duration of most
of the metadata locks being held, except for HANDLER and GRL locks,
to transactional for them to be properly released at UNLOCK TABLES.
*/
mdl_context.set_transaction_duration_for_all_locks();
/*
Make sure we don't release the global read lock and commit blocker
when leaving LTM.
*/
global_read_lock.set_explicit_lock_duration(this);
/* Also ensure that we don't release metadata locks for open HANDLERs. */
if (handler_tables_hash.records)
mysql_ha_set_explicit_lock_duration(this);
}
locked_tables_mode= LTM_NONE;
}
void THD::get_definer(LEX_USER *definer)
{
binlog_invoker();
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
if (slave_thread && has_invoker())
{
definer->user = invoker_user;
definer->host= invoker_host;
definer->password.str= NULL;
definer->password.length= 0;
definer->plugin.str= (char *) "";
definer->plugin.length= 0;
definer->auth.str= (char *) "";
definer->auth.length= 0;
}
else
#endif
get_default_definer(this, definer);
}
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
void THD::set_connection_certificate() {
DBUG_ASSERT(connection_certificate_buf == nullptr);
connection_certificate_buf = get_peer_cert_info(
false, &connection_certificate_buf_len);
}
void THD::reset_connection_certificate() {
if (connection_certificate_buf) {
my_free(connection_certificate_buf);
connection_certificate_buf = nullptr;
connection_certificate_buf_len = 0;
}
}
const char *THD::connection_certificate() const {
return connection_certificate_buf;
}
uint32 THD::connection_certificate_length() const {
return connection_certificate_buf ? connection_certificate_buf_len : 0;
}
char *THD::get_peer_cert_info(bool display, int *cert_len)
{
Vio* vio = get_net()->vio;
if (!vio_ok() || !vio->ssl_arg) {
return NULL;
}
SSL *ssl= (SSL*) vio->ssl_arg;
// extract user cert ref from the thread
X509 *cert= SSL_get_peer_certificate(ssl);
if (!cert) {
return NULL;
}
// Create new X509 buffer abstraction
BIO *bio = BIO_new(BIO_s_mem());
if (!bio) {
X509_free(cert);
return NULL;
}
// Print the certificate to the buffer
int status;
if (display) {
status = X509_print(bio, cert);
} else {
status = PEM_write_bio_X509(bio, cert);
}
if (status != 1) {
BIO_free(bio);
X509_free(cert);
return NULL;
}
int buflen = BIO_pending(bio);
char *cert_buf = (char *)my_malloc(buflen, MYF(MY_WME));
*cert_len = BIO_read(bio, cert_buf, buflen);
if (*cert_len == -1) {
*cert_len = 0;
my_free(cert_buf);
BIO_free(bio);
X509_free(cert);
return NULL;
}
DBUG_ASSERT(*cert_len <= buflen);
BIO_free(bio);
X509_free(cert);
return cert_buf;
}
#endif
/**
Mark transaction to rollback and mark error as fatal to a sub-statement.
@param all TRUE <=> rollback main transaction.
*/
void THD::mark_transaction_to_rollback(bool all)
{
/*
There is no point in setting is_fatal_sub_stmt_error unless
we are actually in_sub_stmt.
*/
if (in_sub_stmt)
is_fatal_sub_stmt_error= true;
transaction_rollback_request= all;
/*
Aborted transactions can not be IGNOREd.
Switch off the IGNORE flag for the current
SELECT_LEX. This should allow my_error()
to report the error and abort the execution
flow, even in presence
of IGNORE clause.
*/
if (lex->current_select)
lex->current_select->no_error= false;
}
/***************************************************************************
Handling of XA id cacheing
***************************************************************************/
mysql_mutex_t LOCK_xid_cache;
HASH xid_cache;
extern "C" uchar *xid_get_hash_key(const uchar *, size_t *, my_bool);
extern "C" void xid_free_hash(void *);
uchar *xid_get_hash_key(const uchar *ptr, size_t *length,
my_bool not_used MY_ATTRIBUTE((unused)))
{
*length=((XID_STATE*)ptr)->xid.key_length();
return ((XID_STATE*)ptr)->xid.key();
}
void xid_free_hash(void *ptr)
{
if (!((XID_STATE*)ptr)->in_thd)
my_free(ptr);
}
#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key key_LOCK_xid_cache;
static PSI_mutex_info all_xid_mutexes[]=
{
{ &key_LOCK_xid_cache, "LOCK_xid_cache", PSI_FLAG_GLOBAL}
};
static void init_xid_psi_keys(void)
{
const char* category= "sql";
int count;
count= array_elements(all_xid_mutexes);
mysql_mutex_register(category, all_xid_mutexes, count);
}
#endif /* HAVE_PSI_INTERFACE */
bool xid_cache_init()
{
#ifdef HAVE_PSI_INTERFACE
init_xid_psi_keys();
#endif
mysql_mutex_init(key_LOCK_xid_cache, &LOCK_xid_cache, MY_MUTEX_INIT_FAST);
return my_hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
xid_get_hash_key, xid_free_hash, 0) != 0;
}
void xid_cache_free()
{
if (my_hash_inited(&xid_cache))
{
my_hash_free(&xid_cache);
mysql_mutex_destroy(&LOCK_xid_cache);
}
}
XID_STATE *xid_cache_search(XID *xid)
{
mysql_mutex_lock(&LOCK_xid_cache);
XID_STATE *res=(XID_STATE *)my_hash_search(&xid_cache, xid->key(),
xid->key_length());
mysql_mutex_unlock(&LOCK_xid_cache);
return res;
}
bool xid_cache_insert(XID *xid, enum xa_states xa_state)
{
XID_STATE *xs;
my_bool res;
mysql_mutex_lock(&LOCK_xid_cache);
if (my_hash_search(&xid_cache, xid->key(), xid->key_length()))
res=0;
else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
res=1;
else
{
xs->xa_state=xa_state;
xs->xid.set(xid);
xs->in_thd=0;
xs->rm_error=0;
res=my_hash_insert(&xid_cache, (uchar*)xs);
}
mysql_mutex_unlock(&LOCK_xid_cache);
return res;
}
bool xid_cache_insert(XID_STATE *xid_state)
{
mysql_mutex_lock(&LOCK_xid_cache);
if (my_hash_search(&xid_cache, xid_state->xid.key(),
xid_state->xid.key_length()))
{
mysql_mutex_unlock(&LOCK_xid_cache);
my_error(ER_XAER_DUPID, MYF(0));
return true;
}
bool res= my_hash_insert(&xid_cache, (uchar*)xid_state);
mysql_mutex_unlock(&LOCK_xid_cache);
return res;
}
void xid_cache_delete(XID_STATE *xid_state)
{
mysql_mutex_lock(&LOCK_xid_cache);
my_hash_delete(&xid_cache, (uchar *)xid_state);
mysql_mutex_unlock(&LOCK_xid_cache);
}
void THD::set_next_event_pos(const char* _filename, ulonglong _pos)
{
char*& filename= binlog_next_event_pos.file_name;
if (filename == NULL)
{
/* First time, allocate maximal buffer */
filename= (char*) my_malloc(FN_REFLEN+1, MYF(MY_WME));
if (filename == NULL) return;
}
assert(strlen(_filename) <= FN_REFLEN);
strcpy(filename, _filename);
filename[ FN_REFLEN ]= 0;
binlog_next_event_pos.pos= _pos;
};
void THD::clear_next_event_pos()
{
if (binlog_next_event_pos.file_name != NULL)
{
my_free(binlog_next_event_pos.file_name);
}
binlog_next_event_pos.file_name= NULL;
binlog_next_event_pos.pos= 0;
};
void THD::set_user_connect(USER_CONN *uc)
{
DBUG_ENTER("THD::set_user_connect");
m_user_connect= uc;
DBUG_VOID_RETURN;
}
void THD::increment_user_connections_counter()
{
DBUG_ENTER("THD::increment_user_connections_counter");
m_user_connect->connections++;
DBUG_VOID_RETURN;
}
void THD::decrement_user_connections_counter()
{
DBUG_ENTER("THD::decrement_user_connections_counter");
DBUG_ASSERT(m_user_connect->connections > 0);
m_user_connect->connections--;
DBUG_VOID_RETURN;
}
void THD::increment_con_per_hour_counter()
{
DBUG_ENTER("THD::decrement_conn_per_hour_counter");
m_user_connect->conn_per_hour++;
DBUG_VOID_RETURN;
}
void THD::increment_updates_counter()
{
DBUG_ENTER("THD::increment_updates_counter");
m_user_connect->updates++;
DBUG_VOID_RETURN;
}
void THD::increment_questions_counter()
{
DBUG_ENTER("THD::increment_updates_counter");
m_user_connect->questions++;
DBUG_VOID_RETURN;
}
/*
Reset per-hour user resource limits when it has been more than
an hour since they were last checked
SYNOPSIS:
time_out_user_resource_limits()
NOTE:
This assumes that the LOCK_user_conn mutex has been acquired, so it is
safe to test and modify members of the USER_CONN structure.
*/
void THD::time_out_user_resource_limits()
{
mysql_mutex_assert_owner(&LOCK_user_conn);
ulonglong check_time= start_utime;
DBUG_ENTER("time_out_user_resource_limits");
/* If more than a hour since last check, reset resource checking */
if (check_time - m_user_connect->reset_utime >= LL(3600000000))
{
m_user_connect->questions=1;
m_user_connect->updates=0;
m_user_connect->conn_per_hour=0;
m_user_connect->reset_utime= check_time;
}
DBUG_VOID_RETURN;
}
/**
Determine if binlogging is disabled for this session
@retval 0 if the current statement binlogging is disabled
(could be because of binlog closed/binlog option
is set to false).
@retval 1 if the current statement will be binlogged
*/
bool THD::is_current_stmt_binlog_disabled() const
{
return (!(variables.option_bits & OPTION_BIN_LOG) ||
!mysql_bin_log.is_open());
}
bool THD::skip_unique_check()
{
return rli_slave && rli_slave->get_skip_unique_check();
}
/**
This function selects which session tracker to use. If a Srv_session
is currently attached to this connection we want to redirect all session
tracking information to the Srv_session's tracking structures.
*/
Session_tracker* THD::get_tracker() {
return attached_srv_session
? &attached_srv_session->get_thd()->session_tracker
: &session_tracker;
}
static std::string net_read_str(const char **ptr)
{
size_t len = net_field_length((uchar**)ptr);
const char *str = *ptr;
*ptr += len;
return std::string(str, len);
}
static void set_attrs_map(const char *ptr, size_t length,
std::unordered_map<std::string, std::string> &attrs_map)
{
const char *end = ptr + length;
attrs_map.clear();
while (ptr < end)
{
std::string key = net_read_str(&ptr);
std::string value = net_read_str(&ptr);
attrs_map[key] = value;
}
}
void THD::set_connection_attrs(const char *attrs, size_t length)
{
mysql_mutex_lock(&LOCK_thd_data);
set_attrs_map(attrs, length, connection_attrs_map);
mysql_mutex_unlock(&LOCK_thd_data);
}
void THD::set_query_attrs(const char *attrs, size_t length)
{
mysql_mutex_lock(&LOCK_thd_data);
set_attrs_map(
attrs,
length,
query_attrs_map);
mysql_mutex_unlock(&LOCK_thd_data);
}
void THD::set_query_attrs(
const std::unordered_map<std::string, std::string>& attrs) {
mysql_mutex_lock(&LOCK_thd_data);
query_attrs_map = attrs;
mysql_mutex_unlock(&LOCK_thd_data);
}
int THD::parse_query_info_attr()
{
static const std::string query_info_key = "query_info";
auto it = this->query_attrs_map.find(query_info_key);
if (it == this->query_attrs_map.end())
return 0;
ptree root;
try
{
std::istringstream query_info_attr(it->second);
boost::property_tree::read_json(query_info_attr, root);
}
catch(const boost::property_tree::json_parser::json_parser_error& e)
{
return -1; // invalid json
}
try
{
boost::optional<std::string> trace_id =
root.get_optional<std::string>("traceid");
if (trace_id)
this->trace_id = *trace_id;
this->query_type = root.get<std::string>("query_type");
this->num_queries = root.get<uint64_t>("num_queries");
}
catch(const boost::property_tree::ptree_error& e)
{
return -1; // invalid key or value
}
return 0;
}
static std::string get_shard_id(const std::string& db_metadata)
{
try {
#ifdef HAVE_RAPIDJSON
rapidjson::Document db_metadata_root;
// The local_db_metadata format should be:
// {"shard":"<shard_name>", "replicaset":"<replicaset_id>"}
if (db_metadata_root.Parse(db_metadata.c_str()).HasParseError() ||
!db_metadata_root.IsObject()) {
return {};
}
const auto iter= db_metadata_root.FindMember("shard");
std::string shard_id;
if (iter != db_metadata_root.MemberEnd()) {
shard_id= iter->value.GetString();
}
#else
boost::property_tree::ptree db_metadata_root;
std::istringstream is(db_metadata);
boost::property_tree::read_json(is, db_metadata_root);
std::string shard_id = db_metadata_root.get<std::string>("shard");
#endif
return shard_id;
}
catch (std::exception)
{
return {};
}
}
void THD::set_shard_id()
{
this->shard_id = get_shard_id(this->db_metadata);
}
static const String async_token("async-",default_charset_info);
static const String mtcaller_token("mtcaller-",default_charset_info);
static const String mtthrottle_token("mtthrottle-",default_charset_info);
/*
serialize_client_attrs
Extracts and serializes client attributes into the buffer
THD::client_attrs_string.
This is only calculated once per command (as opposed to per statement),
and cleared at the end of the command. This is because attributes are
attached commands, not statements.
*/
void THD::serialize_client_attrs()
{
DBUG_ASSERT(!in_capture_sql_plan());
if (sql_stats_control != SQL_INFO_CONTROL_ON)
return;
if (client_attrs_string.is_empty()) {
std::vector<std::pair<String, String>> client_attrs;
bool found_async_id = false;
bool found_caller = false;
mysql_mutex_lock(&LOCK_global_sql_stats);
// Populate caller, origina_caller, async_id, etc
for(const std::string& name_iter : client_attribute_names) {
bool found = false;
auto it = query_attrs_map.find(name_iter);
if (it != query_attrs_map.end()) {
found = true;
} else if ((it = connection_attrs_map.find(name_iter))
!= connection_attrs_map.end()) {
found = true;
}
if (found) {
if (name_iter == "async_id") {
found_async_id = true;
}
if (name_iter == "caller") {
found_caller = true;
}
client_attrs.emplace_back(String(it->first.data(), it->first.size(),
&my_charset_bin),
String(it->second.data(), it->second.size(),
&my_charset_bin));
}
}
mysql_mutex_unlock(&LOCK_global_sql_stats);
// Populate async id (inspired from find_async_tag)
//
// Search only in first 100 characters to avoid scanning the whole query.
// The async id is usually near the beginning.
//
// Only look if async_id was not passed down.
String query100(query(), MY_MIN(100, query_length()), &my_charset_bin);
if (!found_async_id) {
int pos = query100.strstr(async_token);
if (pos != -1) {
pos += async_token.length();
int epos = pos;
while(epos < (int)query100.length() && std::isdigit(query100[epos])) {
epos++;
}
client_attrs.emplace_back(String("async_id", &my_charset_bin),
String(&query100[pos], epos - pos, &my_charset_bin));
}
}
// Populate mt caller from query comments, if it wasn't passed as query or
// connection attribute already. We only search the first 100 characters to
// avoid scanning the whole query and only consider the first match. The
// tag must be terminated with a colon(:).
// Example - mtcaller-tsp_global/myservice:
if (write_throttle_parse_query_comments) {
if (!found_caller) {
int pos = query100.strstr(mtcaller_token);
if (pos != -1) {
pos += mtcaller_token.length();
int epos = pos;
while(epos < (int)query100.length() && query100[epos] != ':') {
epos++;
}
client_attrs.emplace_back(String("caller", &my_charset_bin),
String(&query100[pos], epos - pos, &my_charset_bin));
}
}
// Populate mt_throttle_okay attribute from query comments, if it wasn't
// passed as query or connection attribute already. We only search the
// first 100 characters to avoid scanning the whole query and only
// consider the first match. There are two possible tags mtthrottle-E
// for error and mtthrottle-W for warning mode.
if (variables.write_throttle_tag_only &&
query_attrs_map.find("mt_throttle_okay") == query_attrs_map.end()) {
int pos = query100.strstr(mtthrottle_token);
if (pos != -1) {
pos += mtthrottle_token.length();
mysql_mutex_lock(&LOCK_thd_data);
if (query100[pos] == 'E') {
query_attrs_map.emplace("mt_throttle_okay", "ERROR");
}
if (query100[pos] == 'W') {
query_attrs_map.emplace("mt_throttle_okay", "WARN");
}
mysql_mutex_unlock(&LOCK_thd_data);
}
}
}
// Serialize into JSON
auto& buf = client_attrs_string;
buf.q_append('{');
for (size_t i = 0; i < client_attrs.size(); i++) {
const auto& p = client_attrs[i];
if (i > 0) {
buf.q_append(C_STRING_WITH_LEN(", "));
}
buf.q_append('\'');
buf.q_append(p.first.ptr(), MY_MIN(100, p.first.length()));
buf.q_append(C_STRING_WITH_LEN("' : '"));
buf.q_append(p.second.ptr(), MY_MIN(100, p.second.length()));
buf.q_append('\'');
}
buf.q_append('}');
mysql_mutex_lock(&LOCK_thd_data);
compute_md5_hash((char *)mt_key_val[CLIENT_ID].data(),
client_attrs_string.ptr(),
client_attrs_string.length());
mt_key_val_set[CLIENT_ID] = true;
mysql_mutex_unlock(&LOCK_thd_data);
}
}
/**
Helper function to adjust global usage/peak counters by accumulated delta.
@param unreported_delta pending delta for global usage update
@param g_value global usage value
@param g_peak global usage peak
@param g_period_peak global usage peak for some period
*/
static void adjust_global_by(longlong &unreported_delta, ulonglong &g_value,
ulonglong &g_peak, ulonglong &g_period_peak)
{
/* Counters could be disabled at runtime, re-enable requires restart. */
if (max_tmp_disk_usage == TMP_DISK_USAGE_DISABLED)
return;
/*
This is where threads on the secondary could race to find which
one is doing global update. Only one will grab the whole unreported
amount.
*/
longlong delta = my_atomic_fas64(&unreported_delta, 0);
/*
It's possible that delta now is less than the increment but it would
be very rare so just do the update regardless.
*/
if (delta != 0)
{
ulonglong old_value = my_atomic_add64((int64 *)&g_value, delta);
ulonglong new_value = old_value + delta;
/* Check for over and underflow. */
DBUG_ASSERT(delta >= 0 ? new_value >= old_value : new_value < old_value);
/* Now update the global peak. */
ulonglong old_peak = my_atomic_load64((int64 *)&g_peak);
while (old_peak < new_value)
{
/* If Compare-And-Swap is unsuccessful then old_peak is updated. */
if (my_atomic_cas64((int64 *)&g_peak, (int64 *)&old_peak, new_value))
break;
}
/* Update the period peak. */
old_peak = my_atomic_load64((int64 *)&g_period_peak);
while (old_peak < new_value)
{
/* If Compare-And-Swap is unsuccessful then old_peak is updated. */
if (my_atomic_cas64((int64 *)&g_period_peak, (int64 *)&old_peak,
new_value))
break;
}
}
}
/**
Helper function to adjust local session and global usage/peak counters
by specified delta. The global updates are batched to avoid frequent
updates.
@param value session usage value
@param peak session usage peak
@param delta signed usage delta in bytes
@param unreported_delta pending delta for global usage update
@param g_value global usage value
@param g_peak global usage peak
@param g_period_peak global usage peak for some period
*/
static void adjust_by(ulonglong &value, ulonglong &peak, longlong delta,
longlong &unreported_delta, ulonglong &g_value,
ulonglong &g_peak, ulonglong &stmt_peak,
ulonglong &g_period_peak)
{
/*
Atomic operation is only needed for the secondary threads that steal
tmp tables from one another (see mts_move_temp_tables_to_thd).
*/
ulonglong old_value = my_atomic_add64((int64 *)&value, delta);
ulonglong new_value = old_value + delta;
/* Check for over and underflow. */
DBUG_ASSERT(delta >= 0 ? new_value >= old_value : new_value < old_value);
/* Correct on primary, best effort on secondary. */
if (peak < new_value)
peak = new_value;
if (stmt_peak < new_value)
stmt_peak = new_value;
/* Avoid frequent updates of global usage. */
const ulonglong DISK_USAGE_REPORTING_INCREMENT = 8192;
longlong new_delta = my_atomic_add64(&unreported_delta, delta) + delta;
ulonglong abs_delta = new_delta >= 0 ? new_delta : -new_delta;
if (abs_delta >= DISK_USAGE_REPORTING_INCREMENT)
{
adjust_global_by(unreported_delta, g_value, g_peak, g_period_peak);
}
}
/**
Adjust tmp table disk usage for current session.
@param delta signed delta value in bytes
*/
void THD::adjust_tmp_table_disk_usage(longlong delta)
{
adjust_by(status_var.tmp_table_disk_usage,
status_var.tmp_table_disk_usage_peak, delta,
unreported_global_tmp_table_delta,
global_status_var.tmp_table_disk_usage,
global_status_var.tmp_table_disk_usage_peak,
m_stmt_tmp_table_disk_usage_peak,
tmp_table_disk_usage_period_peak);
}
/**
Adjust filesort disk usage for current session.
@param delta signed delta value in bytes
*/
void THD::adjust_filesort_disk_usage(longlong delta)
{
adjust_by(status_var.filesort_disk_usage,
status_var.filesort_disk_usage_peak, delta,
unreported_global_filesort_delta,
global_status_var.filesort_disk_usage,
global_status_var.filesort_disk_usage_peak,
m_stmt_filesort_disk_usage_peak,
filesort_disk_usage_period_peak);
}
/**
Propagate pending global disk usage at the end of session.
*/
void THD::propagate_pending_global_disk_usage()
{
adjust_global_by(unreported_global_tmp_table_delta,
global_status_var.tmp_table_disk_usage,
global_status_var.tmp_table_disk_usage_peak,
tmp_table_disk_usage_period_peak);
adjust_global_by(unreported_global_filesort_delta,
global_status_var.filesort_disk_usage,
global_status_var.filesort_disk_usage_peak,
filesort_disk_usage_period_peak);
}
/*
Set the priority of the underlying OS thread.
@param pri The priority to set the thread to.
@return true on success, false otherwise.
*/
bool THD::set_thread_priority(int pri)
{
DBUG_ENTER("THD::set_thread_priority");
bool ret= true;
if (get_thread_priority() != pri) {
mysql_mutex_lock(&LOCK_thd_data);
ret= set_system_thread_priority(system_thread_id, pri);
if (ret)
{
thread_priority= pri;
}
mysql_mutex_unlock(&LOCK_thd_data);
}
DBUG_RETURN(ret);
}
/*
Set DML monitoring start time to measure resource usage
*/
void THD::set_dml_start_time()
{
/* if the dml_start_time is already initialized then do nothing */
if (dml_start_time_is_set)
return;
/* if stmt_start_write_time is set, use that value */
if (m_stmt_start_write_time_is_set)
{
dml_start_time = m_stmt_start_write_time;
}
#if HAVE_CLOCK_GETTIME
dml_start_result = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &dml_start_time);
#elif HAVE_GETRUSAGE
dml_start_result = getrusage(RUSAGE_THREAD, &dml_start_time);
#else
#error implement getting current thread CPU time on this platform
#endif
/* remember that we have initialized dml_start_time */
dml_start_time_is_set = true;
}
/*
Get DML CPU time
- Returns the CPU time elapsed from the time DML monitoring
has started
*/
ulonglong THD::get_dml_cpu_time()
{
#if HAVE_CLOCK_GETTIME
timespec time_end;
if (dml_start_result == 0 &&
(clock_gettime(CLOCK_THREAD_CPUTIME_ID, &time_end) == 0))
{
/* diff_timespec returns nanoseconds */
ulonglong dml_cpu_time = diff_timespec(time_end, dml_start_time);
dml_cpu_time /= 1000000; /* convert to milliseconds */
return dml_cpu_time;
}
#elif HAVE_GETRUSAGE
struct rusage rusage_end;
if (dml_start_result == 0 &&
(getrusage(RUSAGE_THREAD, &rusage_end) == 0))
{
ulonglong val_utime =
RUSAGE_DIFF_USEC(rusage_end.ru_utime, dml_start_time.ru_utime);
ulonglong val_stime =
RUSAGE_DIFF_USEC(rusage_end.ru_stime, dml_start_time.ru_stime);
ulonglong dml_cpu_time = val_utime + val_stime; /* Units: microseconds */
dml_cpu_time /= 1000; /* convert to milliseconds */
return dml_cpu_time;
}
#else
#error implement getting current thread CPU time on this platform
#endif
return 0;
}
/**
check if CPU execution time limit has exceeded
@param stats statistics obtained from handler
@return true if the CPU execution time limit has exceeded and the
query needs to be errored out. Returns false otherwise.
Note: The function will register a warning as note if the variable
'write_control_level' is set to 'NOTE'. If the variable is set
to 'WARN' then a regular warning is raised.
The function will return TRUE if the CPU execution time limt
has exceeded only if the variable 'write_control_level' is set to ERROR
and if the variables 'write_cpu_limit_milliseconds' and
'write_time_check_batch' are set to non-zero values.
*/
bool THD::dml_execution_cpu_limit_exceeded(ha_statistics* stats)
{
/* enforcing DML execution time limit is disabled if
* - write_control_level is set to 'OFF' or
* - write_cpu_limit_milliseconds is set to 0 or
* - write_time_check_batch is set to 0
* - sql_log_bin is set to 0
*/
if (write_control_level == CONTROL_LEVEL_OFF ||
write_cpu_limit_milliseconds == 0 ||
write_time_check_batch == 0 || variables.sql_log_bin == 0)
return false;
/* if the variable 'write_control_level' is set to 'NOTE' or 'WARN'
* then stop checking for CPU execution time limit any further
* because the warning needs to be raised only once per statement/transaction
*/
if ((write_control_level == CONTROL_LEVEL_NOTE || /* NOTE */
write_control_level == CONTROL_LEVEL_WARN) &&/* WARN */
trx_dml_cpu_time_limit_warning)
return false;
ulonglong dml_rows_processed = trx_dml_row_count + get_dml_row_count(stats);
/* bail out if there are no rows processed for DML */
if (dml_rows_processed == 0)
return false;
/* first row processed */
if (dml_rows_processed == 1)
set_dml_start_time();
else if (dml_rows_processed % write_time_check_batch == 0)
{
ulonglong dml_cpu_time = get_dml_cpu_time();
DBUG_EXECUTE_IF("dbug.force_long_running_query",
dml_cpu_time=write_cpu_limit_milliseconds;);
if (dml_cpu_time >= (ulonglong) write_cpu_limit_milliseconds)
{
/* raise warning if 'write_control_level' is 'NOTE' or 'WARN' */
if (write_control_level == CONTROL_LEVEL_NOTE ||
write_control_level == CONTROL_LEVEL_WARN)
{
/* raise warning */
push_warning_printf(this,
(write_control_level == CONTROL_LEVEL_NOTE) ?
Sql_condition::WARN_LEVEL_NOTE :
Sql_condition::WARN_LEVEL_WARN,
ER_WARN_WRITE_EXCEEDED_CPU_LIMIT_MILLISECONDS,
ER(ER_WARN_WRITE_EXCEEDED_CPU_LIMIT_MILLISECONDS));
/* remember that the warning has been raised so that further
* warnings will not be raised for the same statement/transaction
*/
trx_dml_cpu_time_limit_warning = true;
}
else if (write_control_level == CONTROL_LEVEL_ERROR)
return true;
}
}
return false;
}
/**
get DML row count
- sum of the rows deleted, rows inserted and rows updated
@param stats statistics obtained from handler
@return returns the DML row count
*/
ulonglong THD::get_dml_row_count(ha_statistics* stats)
{
ulonglong dml_rows_processed = 0;
if (!lex)
return dml_rows_processed;
/* delete statement */
if (lex->sql_command == SQLCOM_DELETE ||
lex->sql_command == SQLCOM_DELETE_MULTI)
dml_rows_processed += stats->rows_deleted;
/* update statement */
else if (lex->sql_command == SQLCOM_UPDATE ||
lex->sql_command == SQLCOM_UPDATE_MULTI)
dml_rows_processed += stats->rows_updated;
/* insert statement */
else if (lex->sql_command == SQLCOM_INSERT ||
lex->sql_command == SQLCOM_INSERT_SELECT)
{
dml_rows_processed += stats->rows_inserted;
/* INSERT ... ON DUPLICATE KEY UPDATE */
if (lex->duplicates == DUP_UPDATE)
dml_rows_processed += stats->rows_updated;
}
return dml_rows_processed;
}
/*
Start the timer for CPU write time to be collected for write_statistics
*/
void THD::set_stmt_start_write_time()
{
if (m_stmt_start_write_time_is_set)
return;
int result;
#if HAVE_CLOCK_GETTIME
result = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &m_stmt_start_write_time);
#elif HAVE_GETRUSAGE
result = getrusage(RUSAGE_THREAD, &stmt_start_write_time);
#endif
m_stmt_start_write_time_is_set = result == 0;
}
/*
Capture the total cpu time(ms) spent to write the rows for stmt
*/
void THD::set_stmt_total_write_time()
{
#if HAVE_CLOCK_GETTIME
timespec time_end;
if (m_stmt_start_write_time_is_set &&
(clock_gettime(CLOCK_THREAD_CPUTIME_ID, &time_end) == 0))
{
/* diff_timespec returns nanoseconds */
m_stmt_total_write_time = diff_timespec(time_end, m_stmt_start_write_time);
m_stmt_total_write_time /= 1000; /* convert to microseconds */
}
#elif HAVE_GETRUSAGE
struct rusage rusage_end;
if (m_stmt_start_write_time_is_set &&
(getrusage(RUSAGE_THREAD, &rusage_end) == 0))
{
ulonglong val_utime =
RUSAGE_DIFF_USEC(rusage_end.ru_utime, stmt_start_write_time.ru_utime);
ulonglong val_stime =
RUSAGE_DIFF_USEC(rusage_end.ru_stime, stmt_start_write_time.ru_stime);
m_stmt_total_write_time = val_utime + val_stime; /* Units: microseconds */
}
#else
#error implement getting current thread CPU time on this platform
#endif
}
/*
Returns all MT keys for the current write query
*/
void THD::get_mt_keys_for_write_query(
std::array<std::string, WRITE_STATISTICS_DIMENSION_COUNT> & keys
){
// Get keys for all the target dimensions to update write stats for
char md5_hex_buffer[MD5_BUFF_LENGTH];
// USER
keys[0] = get_user_name();
// CLIENT ID
array_to_hex(md5_hex_buffer, mt_key_value(THD::CLIENT_ID).data(), MD5_HASH_SIZE);
keys[1].assign(md5_hex_buffer, MD5_BUFF_LENGTH);
// SHARD
keys[2] = get_db_name();
// SQL ID
array_to_hex(md5_hex_buffer, mt_key_value(THD::SQL_ID).data(), MD5_HASH_SIZE);
keys[3].assign(md5_hex_buffer, MD5_BUFF_LENGTH);
}
/**
Should the query be throttled(error/warning) to avoid replication lag based on query tags
Returns 1 for warning only, 2 for throwing error and 0 if query tag isn't present.
*/
enum_control_level THD::get_mt_throttle_tag_level() const {
auto it = query_attrs_map.find("mt_throttle_okay");
// For tag_only traffic(TAO), it should be throttled only if query attribute mt_throttle_okay is present
if (variables.write_throttle_tag_only && it != query_attrs_map.end()) {
if (it->second == "WARN")
return CONTROL_LEVEL_WARN;
if (it->second == "ERROR")
return CONTROL_LEVEL_ERROR;
}
return CONTROL_LEVEL_OFF;
}
/*
Returns the (integer) value of the specified attribute from the
query attribute map or connection attribute map, in this order.
In case the attribute is not found or its value exceeds the max
value passed in then return the specified default value
*/
ulong THD::get_query_or_connect_attr_value(
const char *attr_name,
ulong default_value,
ulong max_value)
{
for (const auto& map : { &query_attrs_map, &connection_attrs_map }) {
auto it = map->find(attr_name);
if (it != map->end()) {
ulong attr_value = 0;
if (!stoul_noexcept(it->second.c_str(), &attr_value) &&
attr_value < max_value)
return attr_value;
}
}
return default_value;
}
/**
Create SQL stats snapshot if sql_stats_auto_snapshot is enabled.
*/
void THD::auto_create_sql_stats_snapshot()
{
if (!variables.sql_stats_snapshot &&
variables.sql_stats_auto_snapshot &&
!toggle_sql_stats_snapshot(this))
{
variables.sql_stats_snapshot = TRUE;
m_created_auto_stats_snapshot = true;
}
}
/**
Release auto created SQL stats snapshot.
*/
void THD::release_auto_created_sql_stats_snapshot()
{
if (m_created_auto_stats_snapshot)
{
DBUG_ASSERT(variables.sql_stats_snapshot);
/* Release cannot fail. */
toggle_sql_stats_snapshot(this);
variables.sql_stats_snapshot = FALSE;
m_created_auto_stats_snapshot = false;
}
}
static const char missing_digest_msg[] =
"<digest_missing: sql_stats_control required>";
/**
Get query digest
*/
void THD::get_query_digest(String *digest_buffer, const char **str,
uint32 *length, const CHARSET_INFO **cs) {
if (m_digest != NULL) {
compute_digest_text(&m_digest->m_digest_storage, digest_buffer);
}
if (digest_buffer->is_empty() ||
(digest_buffer->length() == 1 &&
digest_buffer->ptr()[0] == 0)) {
/* We couldn't compute digest - we need sql_stats_control */
*str = missing_digest_msg;
*length = sizeof(missing_digest_msg) / sizeof(char) - 1;
*cs = &my_charset_utf8_bin;
} else {
*str = digest_buffer->c_ptr_safe();
*length = digest_buffer->length();
*cs = digest_buffer->charset();
}
}
/**
Check whether the provided DB and table names combination is unique
It also inserts the combination into the list if it is unique
@param unique_full_names - list of full names to check duplicate in
db_name - database name
tab_name - table name
@return true if the combination is unique
*/
static bool check_unique(std::list<std::string> &unique_full_names,
const char *db_name, const char *tab_name) {
std::string full_tname = db_name;
full_tname.append(".");
full_tname.append(tab_name);
/* skip duplicate entries */
if (find(unique_full_names.begin(), unique_full_names.end(), full_tname) !=
unique_full_names.end())
return false;
unique_full_names.emplace_back(full_tname);
return true;
}
/**
Get read and write tables in the query. The tables are returned as a list
of pairs where the first value is the DB name and the second value is the
table name.
The read and writes are paired together
@return List of pairs (dbname, table name) for each of read or write tables
*/
using Table_List = std::list<std::pair<const char *, const char *>>;
std::pair<Table_List, Table_List> THD::get_read_write_tables() {
Table_List read_tables;
Table_List write_tables;
std::list<std::string> unique_read_tables; // unique read table names
std::list<std::string> unique_write_tables; // unique write table names
for (const TABLE_LIST *table_iter = lex->query_tables; table_iter;
table_iter = table_iter->next_global) {
if (table_iter->is_view_or_derived()) continue;
if (table_iter->updating) {
if (check_unique(unique_write_tables, table_iter->get_db_name(),
table_iter->get_table_name()))
write_tables.emplace_back(table_iter->get_db_name(),
table_iter->get_table_name());
} else {
if (check_unique(unique_read_tables, table_iter->get_db_name(),
table_iter->get_table_name()))
read_tables.emplace_back(table_iter->get_db_name(),
table_iter->get_table_name());
}
}
return std::make_pair(read_tables, write_tables);
}
/**
Get tables in the query. The tables are returned as a list of pairs
where the first value is the dbname and the second value is the table name.
@return List of pairs: dbname, table name
*/
Table_List THD::get_query_tables() {
Table_List uniq_tables;
std::list<std::string> uniq_tables_str;
/*
* pointers (to db name, table names etc) are only valid until
* the end of the current query
*/
DBUG_ASSERT(this == current_thd);
/* iterate through the list of tables */
for (const TABLE_LIST *table = lex->query_tables; table != nullptr;
table = table->next_global) {
// not a view/derived table && has not been added already
if (!table->is_view_or_derived() &&
check_unique(uniq_tables_str, table->get_db_name(),
table->get_table_name()))
uniq_tables.emplace_back(table->get_db_name(), table->get_table_name());
}
return uniq_tables;
}
/**
Get tables in the query. The tables are returned as a list of pairs
where the first value is the dbname and the second value is the table name.
@param thd Thread pointer
@return List of pairs: dbname, table name
*/
std::list<std::pair<const char*, const char*> > thd_get_query_tables(
THD *thd) {
return thd->get_query_tables();
}
/**
Get the value of the query attribute
@param qattr_key Name of the query attribute
@return Value of the query attribute 'qattr_key'
*/
const std::string &THD::get_query_attr(const std::string &qattr_key) {
/* find the key in the query attributes */
auto it = query_attrs_map.find(qattr_key);
if (it != query_attrs_map.end()) {
return it->second;
}
/* return empty result */
return emptyStr;
}
/**
Get the value of the connection attribute
@param cattr_key Name of the connection attribute
@return Value of the query attribute 'cattr_key'
*/
const std::string &THD::get_connection_attr(const std::string &cattr_key) {
/* find the key in the connection attributes */
auto it = connection_attrs_map.find(cattr_key);
if (it != connection_attrs_map.end()) {
return it->second;
}
/* return empty result */
return emptyStr;
}
/**
Get the value of the query attribute
@param thd The MySQL internal thread pointer
@param qattr_key Name of the query attribute
@return Value of the query attribute 'qattr_key'
*/
const std::string &thd_get_query_attr(THD *thd, const std::string &qattr_key) {
return thd->get_query_attr(qattr_key);
}
/**
Get the value of the connection attribute
@param thd The MySQL internal thread pointer
@param cattr_key Name of the connection attribute
@return Value of the query attribute 'cattr_key'
*/
const std::string &thd_get_connection_attr(THD *thd,
const std::string &cattr_key) {
return thd->get_connection_attr(cattr_key);
}
/**
Get the query SQL ID
@param thd The MySQL internal thread pointer
@return the SQL ID of the query
*/
const std::string thd_get_sql_id(THD *thd) {
char sql_id_string[MD5_BUFF_LENGTH+1];
if (thd->mt_key_is_set(THD::SQL_ID)) {
array_to_hex(sql_id_string, thd->mt_key_value(THD::SQL_ID).data(),
MD5_HASH_SIZE),
sql_id_string[MD5_BUFF_LENGTH] = '\0';
} else {
sql_id_string[0] = '\0';
}
return std::string(sql_id_string);
}
void thd_add_response_attr(
THD *thd, const std::string &rattr_key, const std::string &rattr_val)
{
auto tracker= thd->session_tracker.get_tracker(SESSION_RESP_ATTR_TRACKER);
if (tracker->is_enabled())
{
LEX_CSTRING key= { rattr_key.c_str(), rattr_key.length() };
LEX_CSTRING value= { rattr_val.c_str(), rattr_val.length() };
tracker->mark_as_changed(thd, &key, &value);
}
}
#ifndef EMBEDDED_LIBRARY
/**
Interface for Engine to report row lock conflict.
The caller should guarantee thd_wait_for does not be freed, when it is
called.
*/
extern "C"
void thd_report_row_lock_wait(THD* self, THD *wait_for)
{
DBUG_ENTER("thd_report_row_lock_wait");
DBUG_EXECUTE_IF("report_row_lock_wait", {
const char act[]= "now signal signal.reached wait_for signal.done";
DBUG_ASSERT(opt_debug_sync_timeout > 0);
DBUG_ASSERT(!debug_sync_set_action(self, STRING_WITH_LEN(act)));
};);
if (unlikely(self != NULL && wait_for != NULL &&
is_mts_worker(self) && is_mts_worker(wait_for)))
commit_order_manager_check_deadlock(self, wait_for);
DBUG_VOID_RETURN;
}
#else
extern "C"
void thd_report_row_lock_wait(THD* self, THD *thd_wait_for)
{
return;
}
#endif
/**
Call thd_wait_begin to mark the wait start.
*/
Thd_wait_scope::Thd_wait_scope(THD *thd, int wait_type) : m_thd(thd) {
thd_wait_begin(m_thd, wait_type);
}
/**
Call thd_wait_end to mark the wait end.
*/
Thd_wait_scope::~Thd_wait_scope() {
thd_wait_end(m_thd);
}
bool THD::set_dscp_on_socket() {
int dscp_val = variables.dscp_on_socket;
if (dscp_val < 0 || dscp_val >= 64) {
// NO_LINT_DEBUG
sql_print_warning("Invalid DSCP_QOS value in global var: %d",
dscp_val);
return false;
}
const NET* net = get_net();
int tos= dscp_val << 2;
// figure out what domain is the socket in
uint16_t test_family;
socklen_t len= sizeof(test_family);
int res= mysql_socket_getsockopt(net->vio->mysql_socket, SOL_SOCKET,
SO_DOMAIN, (void*)&test_family, &len);
// Lets fail, if we can't determine IPV6 vs IPV4
if (res != 0) {
// NO_LINT_DEBUG
sql_print_warning("Failed to get socket domain "
"while adjusting DSCP_QOS (error: %s)",
strerror(errno));
return false;
}
#ifdef HAVE_IPV6
if (test_family == AF_INET6) {
res= mysql_socket_setsockopt(net->vio->mysql_socket, IPPROTO_IPV6,
IPV6_TCLASS, &tos, sizeof(tos));
}
else
#endif
if (test_family == AF_INET) {
res= mysql_socket_setsockopt(net->vio->mysql_socket, IPPROTO_IP,
IP_TOS, &tos, sizeof(tos));
} else if (test_family == PF_LOCAL) {
// skip setting socket TOS/TCLASS when access from local to host
return true;
} else {
// NO_LINT_DEBUG
sql_print_warning("Failed to get socket family %d", test_family);
return false;
}
if (res != 0) {
// NO_LINT_DEBUG
sql_print_warning("Failed to set TOS/TCLASS "
"with (error: %s) DSCP: %d.",
strerror(errno), tos);
return false;
}
return true;
}
void THD::set_query_cumulative_stats(SHARED_SQL_STATS* cumulative_sql_stats) {
this->cumulative_sql_stats = cumulative_sql_stats;
should_update_stats = true;
}
void THD::reset_counters_for_query() {
init_sql_cpu_capture();
set_last_examined_row_count();
}
void THD::reset_counters_for_next_subquery_stats() {
set_last_examined_row_count();
}
void THD::init_sql_cpu_capture() {
sql_cpu = 0;
#if HAVE_CLOCK_GETTIME
last_cpu_info_result = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &cpu_start_timespec.time_begin_cpu_capture);
#elif HAVE_GETRUSAGE
last_cpu_info_result = getrusage(RUSAGE_THREAD, &cpu_start_timespec.rusage_begin_cpu_capture);
#endif
}
void THD::set_last_examined_row_count() {
last_examined_row_count = get_accessed_rows_and_keys();
}
void THD::update_sql_stats_periodic() {
if (!cumulative_sql_stats ||
!should_update_stats ||
!m_digest ||
(stmt_start == 0) ||
(min_examined_row_limit_for_sql_stats == 0)) {
return;
}
if (min_examined_row_limit_for_sql_stats >
(get_accessed_rows_and_keys() - last_examined_row_count)) {
return;
}
update_sql_cpu_for_query(true /*update_stats*/);
}
void THD::update_sql_cpu_for_query(bool update_stats) {
DB_STATS *dbstats = db_stats;
int cpu_res = -1;
bool can_update_stats = false;
THD *thd = current_thd;
USER_STATS *us = thd_get_user_stats(thd);
#if HAVE_CLOCK_GETTIME
timespec time_end;
if (((cpu_res = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &time_end)) == 0) &&
last_cpu_info_result == 0) {
ulonglong diff = diff_timespec(time_end, cpu_start_timespec.time_begin_cpu_capture);
// convert to microseconds
diff /= 1000;
if (dbstats)
dbstats->update_cpu_stats_tot(diff);
us->microseconds_cpu.inc(diff);
sql_cpu += diff;
cpu_start_timespec.time_begin_cpu_capture = time_end;
can_update_stats = true;
}
#elif HAVE_GETRUSAGE
struct rusage rusage_end;
if (((cpu_res = getrusage(RUSAGE_THREAD, &rusage_end)) == 0) &&
last_cpu_info_result == 0) {
ulonglong diffu =
RUSAGE_DIFF_USEC(rusage_end.ru_utime, cpu_start_timespec.rusage_begin_cpu_capture.ru_utime);
ulonglong diffs =
RUSAGE_DIFF_USEC(rusage_end.ru_stime, cpu_start_timespec.rusage_begin_cpu_capture.ru_stime);
if (dbstats)
dbstats->update_cpu_stats(diffu, diffs);
us->microseconds_cpu.inc(diffu + diffs);
us->microseconds_cpu_user.inc(diffu);
us->microseconds_cpu_sys.inc(diffs);
sql_cpu += diffu + diffs;
cpu_start_timespec.rusage_begin_cpu_capture = rusage_end;
can_update_stats = true;
}
#endif
if (can_update_stats && update_stats && should_update_stats) {
DBUG_ASSERT(stmt_start > 0);
stmt_elapsed_utime = (ulonglong)my_timer_to_microseconds(my_timer_since(stmt_start));
update_sql_stats(
current_thd,
cumulative_sql_stats,
query(),
query_length(),
false /*statement_completed*/);
DEBUG_SYNC(thd, "update_sql_stats_periodic");
reset_counters_for_next_subquery_stats();
}
last_cpu_info_result = cpu_res;
}