in erts/emulator/beam/bif.c [4543:4863]
BIF_RETTYPE system_flag_2(BIF_ALIST_2)
{
Sint n;
if (BIF_ARG_1 == am_multi_scheduling) {
if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock
|| BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) {
int block = (BIF_ARG_2 == am_block
|| BIF_ARG_2 == am_block_normal);
int normal = (BIF_ARG_2 == am_block_normal
|| BIF_ARG_2 == am_unblock_normal);
switch (erts_block_multi_scheduling(BIF_P,
ERTS_PROC_LOCK_MAIN,
block,
normal,
0)) {
case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
BIF_RET(am_blocked);
case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
BIF_RET(am_blocked_normal);
case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
am_multi_scheduling);
case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked_normal,
am_multi_scheduling);
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(am_enabled);
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
ERTS_VBUMP_ALL_REDS(BIF_P);
BIF_TRAP2(BIF_TRAP_EXPORT(BIF_system_flag_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
case ERTS_SCHDLR_SSPND_YIELD_DONE:
ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
am_multi_scheduling);
case ERTS_SCHDLR_SSPND_EINVAL:
goto error;
default:
ASSERT(0);
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
}
} else if (BIF_ARG_1 == am_schedulers_online) {
Sint old_no;
if (!is_small(BIF_ARG_2))
goto error;
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
&old_no, 0)) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
ERTS_VBUMP_ALL_REDS(BIF_P);
BIF_TRAP2(BIF_TRAP_EXPORT(BIF_system_flag_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
case ERTS_SCHDLR_SSPND_YIELD_DONE:
ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
am_schedulers_online);
case ERTS_SCHDLR_SSPND_EINVAL:
goto error;
default:
ASSERT(0);
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
} else if (BIF_ARG_1 == am_fullsweep_after) {
Uint16 nval;
Uint oval;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs,
(erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
int oval = H_MIN_SIZE;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
goto error;
}
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
H_MIN_SIZE = erts_next_heap_size(n, 0);
erts_thr_progress_unblock();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_bin_vheap_size) {
int oval = BIN_VH_MIN_SIZE;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
goto error;
}
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0);
erts_thr_progress_unblock();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_max_heap_size) {
Eterm *hp, old_value;
Uint sz = 0, max_heap_size, max_heap_flags;
if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags))
goto error;
if (max_heap_size < H_MIN_SIZE && max_heap_size != 0)
goto error;
erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
hp = HAlloc(BIF_P, sz);
old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
H_MAX_SIZE = max_heap_size;
H_MAX_FLAGS = max_heap_flags;
erts_thr_progress_unblock();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_value);
} else if (BIF_ARG_1 == am_debug_flags) {
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_backtrace_depth) {
int oval = erts_backtrace_depth;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
goto error;
}
if (n > MAX_BACKTRACE_SIZE) n = MAX_BACKTRACE_SIZE;
erts_backtrace_depth = n;
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_trace_control_word) {
BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
ErtsTracer new_seq_tracer, old_seq_tracer;
Eterm ret;
if (BIF_ARG_2 == am_false)
new_seq_tracer = erts_tracer_nil;
else
new_seq_tracer = erts_term_to_tracer(THE_NON_VALUE, BIF_ARG_2);
if (new_seq_tracer == THE_NON_VALUE)
goto error;
old_seq_tracer = erts_set_system_seq_tracer(BIF_P,
ERTS_PROC_LOCK_MAIN,
new_seq_tracer);
ERTS_TRACER_CLEAR(&new_seq_tracer);
if (old_seq_tracer == THE_NON_VALUE)
goto error;
if (ERTS_TRACER_IS_NIL(old_seq_tracer))
BIF_RET(am_false);
ret = erts_tracer_to_term(BIF_P, old_seq_tracer);
ERTS_TRACER_CLEAR(&old_seq_tracer);
BIF_RET(ret);
} else if (BIF_ARG_1 == am_reset_seq_trace) {
int i, max;
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
max = erts_ptab_max(&erts_proc);
for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p) {
#ifdef USE_VM_PROBES
p->seq_trace_token = (p->dt_utag != NIL) ? am_have_dt_utag : NIL;
#else
p->seq_trace_token = NIL;
#endif
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
erts_proc_sig_clear_seq_trace_tokens(p);
erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
}
}
erts_thr_progress_unblock();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (BIF_ARG_1 == am_scheduler_wall_time) {
if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)
BIF_TRAP1(system_flag_scheduler_wall_time_trap,
BIF_P, BIF_ARG_2);
} else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
Sint old_no;
if (!is_small(BIF_ARG_2))
goto error;
switch (erts_set_schedulers_online(BIF_P,
ERTS_PROC_LOCK_MAIN,
signed_val(BIF_ARG_2),
&old_no,
1)) {
case ERTS_SCHDLR_SSPND_DONE:
BIF_RET(make_small(old_no));
case ERTS_SCHDLR_SSPND_YIELD_RESTART:
ERTS_VBUMP_ALL_REDS(BIF_P);
BIF_TRAP2(BIF_TRAP_EXPORT(BIF_system_flag_2),
BIF_P, BIF_ARG_1, BIF_ARG_2);
case ERTS_SCHDLR_SSPND_YIELD_DONE:
ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
am_dirty_cpu_schedulers_online);
case ERTS_SCHDLR_SSPND_EINVAL:
goto error;
default:
ASSERT(0);
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
} else if (BIF_ARG_1 == am_time_offset
&& ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) {
ErtsTimeOffsetState res;
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
res = erts_finalize_time_offset();
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
switch (res) {
case ERTS_TIME_OFFSET_PRELIMINARY: {
DECL_AM(preliminary);
BIF_RET(AM_preliminary);
}
case ERTS_TIME_OFFSET_FINAL: {
DECL_AM(final);
BIF_RET(AM_final);
}
case ERTS_TIME_OFFSET_VOLATILE: {
DECL_AM(volatile);
BIF_RET(AM_volatile);
}
default:
ERTS_INTERNAL_ERROR("Unknown state");
}
#ifdef ERTS_ENABLE_MSACC
} else if (BIF_ARG_1 == am_microstate_accounting) {
Eterm threads;
if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE;
erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new);
Eterm ref = erts_msacc_request(BIF_P, new, &threads);
if (is_non_value(ref))
BIF_RET(old ? am_true : am_false);
BIF_TRAP3(await_msacc_mod_trap,
BIF_P,
ref,
old ? am_true : am_false,
threads);
} else if (BIF_ARG_2 == am_reset) {
Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads);
erts_aint32_t old = erts_atomic32_read_nob(&msacc);
ASSERT(is_value(ref));
BIF_TRAP3(await_msacc_mod_trap,
BIF_P,
ref,
old ? am_true : am_false,
threads);
}
#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
what = ERTS_SCHED_STAT_MODIFY_DISABLE;
else if (ERTS_IS_ATOM_STR("enable", BIF_ARG_2))
what = ERTS_SCHED_STAT_MODIFY_ENABLE;
else if (ERTS_IS_ATOM_STR("clear", BIF_ARG_2))
what = ERTS_SCHED_STAT_MODIFY_CLEAR;
else
goto error;
erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_sched_stat_modify(what);
erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
} else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) {
Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2);
if (is_value(res))
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
erts_send_warning_to_logger_str(
BIF_P->group_leader,
"A call to erlang:system_flag(cpu_topology, _) was made.\n"
"The cpu_topology argument is deprecated and scheduled\n"
"for removal in Erlang/OTP 18. For more information\n"
"see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
erts_send_warning_to_logger_str(
BIF_P->group_leader,
"A call to erlang:system_flag(scheduler_bind_type, _) was\n"
"made. The scheduler_bind_type argument is deprecated and\n"
"scheduled for removal in Erlang/OTP 18. For more\n"
"information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("erts_alloc", BIF_ARG_1)) {
return erts_alloc_set_dyn_param(BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("system_logger", BIF_ARG_1)) {
Eterm res = erts_set_system_logger(BIF_ARG_2);
if (is_value(res)) BIF_RET(res);
}
error:
BIF_ERROR(BIF_P, BADARG);
}