in vm/vmcore/src/class_support/Prepare.cpp [1125:1515]
bool Class::prepare(Global_Env* env)
{
ASSERT_RAISE_AREA;
//
// STEP 1 ::: SIMPLY RETURN IF already prepared, initialized, or currently initializing.
//
if(is_at_least_prepared() || in_error()) // try fast path
return true;
LMAutoUnlock autoUnlocker(m_lock);
if(is_at_least_prepared() || in_error()) // try slow path
return true;
TRACE2("classloader.prepare", "BEGIN class prepare, class name = " << m_name->bytes);
assert(m_state == ST_BytecodesVerified);
//
// STEP 2 ::: PREPARE SUPER-INTERFACES
//
unsigned i;
for(i = 0; i < m_num_superinterfaces; i++) {
assert(m_superinterfaces[i].clss->is_interface());
if(!m_superinterfaces[i].clss->prepare(env)) {
REPORT_FAILED_CLASS_CLASS(m_class_loader, this,
VM_Global_State::loader_env->JavaLangNoClassDefFoundError_String->bytes,
m_name->bytes << ": error preparing superinterface "
<< m_superinterfaces[i].clss->get_name()->bytes);
return false;
}
}
//
// STEP 3 ::: PREPARE SUPERCLASS if needed
//
if(!is_interface() && has_super_class())
{
// Regular class with super-class
if(!get_super_class()->prepare(env)) {
REPORT_FAILED_CLASS_CLASS(m_class_loader, this,
VM_Global_State::loader_env->JavaLangNoClassDefFoundError_String->bytes,
m_name->bytes << ": error preparing superclass "
<< get_super_class()->get_name()->bytes);
return false;
}
}
//
// STEP 4 ::: setup selected class properties
//
if(!is_interface()) {
if(has_super_class()) {
if(get_super_class()->has_finalizer()) {
m_has_finalizer = 1;
}
// Copy over instance size, instance refs, static fields #,
// and num_field_padding_bytes from the super class.
if(m_name == env->JavaLangClass_String) {
// calculate unpadded instance data size
// for java/lang/Class separately
m_unpadded_instance_data_size =
(((unsigned)ManagedObject::get_size() + (GC_OBJECT_ALIGNMENT - 1))
/ GC_OBJECT_ALIGNMENT)
* GC_OBJECT_ALIGNMENT;
} else {
m_unpadded_instance_data_size =
get_super_class()->m_unpadded_instance_data_size;
}
m_num_instance_refs = get_super_class()->m_num_instance_refs;
m_num_field_padding_bytes =
get_super_class()->m_num_field_padding_bytes;
// Copy over number of virtual methods
// and interface methods of super class
m_num_virtual_method_entries =
get_super_class()->m_num_virtual_method_entries;
} else {
// this is java/lang/Object
// FIXME: primitive classes also get here, but this assignment
// has no effect on them really
m_unpadded_instance_data_size = (unsigned)ManagedObject::get_size();
}
}
//
// STEP 5 ::: ASSIGN OFFSETS to the class and instance data FIELDS.
// This SETs class to ST_InstanceSizeComputed state.
//
assign_offsets_to_fields();
assert(m_state == ST_InstanceSizeComputed);
//
// STEP 6 ::: Calculate # of INTERFACES METHODS and build interface table DESCRIPTORS for C
//
std::vector<Class*> intfc_table_entries;
build_interface_table_descriptors(this, intfc_table_entries, 0);
//
// STEP 7 ::: ASSIGN OFFSETS to the class and virtual METHODS
//
assign_offsets_to_methods(env);
if(exn_raised())
return false;
//
// STEP 8 ::: Create the static field block
//
m_static_data_block = (char*)m_class_loader->Alloc(m_static_data_size);
memset(m_static_data_block, 0, m_static_data_size);
#ifdef VM_STATS
// Total number of allocations and total number of bytes for class-related data structures.
// This includes any rounding added to make each item aligned (current alignment is to the next 16 byte boundary).
unsigned num_bytes = (m_static_data_size + 15) & ~15;
VM_Statistics::get_vm_stats().num_statics_allocations++;
if(m_static_data_size > 0) {
VM_Statistics::get_vm_stats().num_nonempty_statics_allocations++;
}
VM_Statistics::get_vm_stats().total_statics_bytes += num_bytes;
#endif
assert(m_static_data_block);
// block must be on a 8 byte boundary
assert((((POINTER_SIZE_INT)(m_static_data_block)) % 8) == 0);
//
// STEP 9 ::: For INTERFACES initialize static fields and return.
//
if(is_interface()) {
bool init_fields = initialize_static_fields_for_interface();
if(!env->InBootstrap())
{
autoUnlocker.ForceUnlock();
assert(hythread_is_suspend_enabled());
if (init_fields
&& jvmti_should_report_event(JVMTI_EVENT_CLASS_PREPARE))
{
jvmti_send_class_prepare_event(this);
}
}
// DONE for interfaces
TRACE2("classloader.prepare", "END class prepare, class name = "
<< m_name->bytes);
return init_fields;
}
//
// STEP 10 ::: COMPUTE number of interface method entries.
//
for(i = 0; i < intfc_table_entries.size(); i++) {
Class* intfc = intfc_table_entries[i];
m_num_intfc_method_entries += intfc->get_number_of_methods();
if(intfc->m_static_initializer) {
// Don't count static initializers of interfaces.
m_num_intfc_method_entries--;
}
}
//
// STEP 11 ::: ALLOCATE the Vtable descriptors array
//
unsigned n_vtable_entries =
m_num_virtual_method_entries + m_num_intfc_method_entries;
if(n_vtable_entries != 0) {
m_vtable_descriptors = new Method*[n_vtable_entries];
// ppervov: FIXME: should throw OOME
}
//
// STEP 12 ::: POPULATE with interface descriptors and virtual method descriptors
// Also, OVERRIDE superclass' methods with those of this one's
//
populate_vtable_descriptors_table_and_override_methods(intfc_table_entries);
//
// STEP 13 ::: CREATE VTABLE and set the Vtable entries to point to the
// code address (a stub or jitted code)
//
create_vtable(n_vtable_entries);
assert(m_vtable);
for(i = 0; i < n_vtable_entries; i++) {
// need to populate with pointers to stubs or compiled code
m_vtable->methods[i] = NULL; // for now
}
if(vm_is_vtable_compressed())
{
m_allocation_handle =
(Allocation_Handle)((UDATA)m_vtable - (UDATA)vm_get_vtable_base_address());
}
else
{
m_allocation_handle = (Allocation_Handle)m_vtable;
}
m_vtable->clss = this;
// Set the vtable entries to point to the code address (a stub or jitted code)
point_vtable_entries_to_stubs();
//
// STEP 14 ::: CREATE and POPULATE the CLASS INTERFACE TABLE
//
m_vtable->intfc_table = create_and_populate_interface_table(intfc_table_entries);
//cache first values
if (m_vtable->intfc_table->n_entries >= 1 ) {
m_vtable->intfc_class_0 = m_vtable->intfc_table->entry[0].intfc_class;
m_vtable->intfc_table_0 = m_vtable->intfc_table->entry[0].table;
}
if (m_vtable->intfc_table->n_entries >= 2 ) {
m_vtable->intfc_class_1 = m_vtable->intfc_table->entry[1].intfc_class;
m_vtable->intfc_table_1 = m_vtable->intfc_table->entry[1].table;
}
if (m_vtable->intfc_table->n_entries >= 3 ) {
m_vtable->intfc_class_2 = m_vtable->intfc_table->entry[2].intfc_class;
m_vtable->intfc_table_2 = m_vtable->intfc_table->entry[2].table;
}
//
// STEP 15 ::: HANDLE JAVA CLASS CLASS separately
//
// Make sure no one hasn't prematurely set these fields since all calculations
// up to this point should be based on clss->unpadded_instance_data_size.
assert(m_instance_data_size == 0);
assert(m_allocated_size == 0);
// Add any needed padding including the OBJECT_HEADER which is used to hold
// things like gc forwarding pointers, mark bits, hashes and locks..
m_allocated_size =
(((m_unpadded_instance_data_size + (GC_OBJECT_ALIGNMENT - 1))
/ GC_OBJECT_ALIGNMENT) * GC_OBJECT_ALIGNMENT) + OBJECT_HEADER_SIZE;
// Move the size to the vtable.
m_vtable->allocated_size = m_allocated_size;
m_instance_data_size = m_allocated_size;
TRACE2("class.size", "class " << this << " allocated size "
<< m_allocated_size);
//
// STEP 16 :::: HANDLE PINNING and Class PROPERTIES if needed.
//
if(has_super_class()
&& (get_super_class()->m_vtable->class_properties & CL_PROP_PINNED_MASK) != 0)
{
// If the super class is pinned then this class is pinned
m_vtable->class_properties |= CL_PROP_PINNED_MASK;
set_instance_data_size_constraint_bit();
}
// Set up the class_properties field.
if(is_array()) {
m_array_element_size = (vm_is_heap_compressed()
? sizeof(COMPRESSED_REFERENCE) : sizeof(RAW_REFERENCE));
m_array_element_shift = m_array_element_size == 8 ? 3 : 2;
m_vtable->class_properties |= CL_PROP_ARRAY_MASK;
if(is_vector_of_primitives(this)) {
m_array_element_shift = shift_of_primitive_array_element(this);
m_array_element_size = 1 << m_array_element_shift;
m_vtable->class_properties |= CL_PROP_NON_REF_ARRAY_MASK;
}
m_vtable->array_element_size = (unsigned short)m_array_element_size;
switch(m_vtable->array_element_size)
{
case 1:
m_vtable->array_element_shift = 0;
break;
case 2:
m_vtable->array_element_shift = 1;
break;
case 4:
m_vtable->array_element_shift = 2;
break;
case 8:
m_vtable->array_element_shift = 3;
break;
default:
m_vtable->array_element_shift = 65535;
LDIE(66, "Unexpected array element size: {0}" << m_vtable->array_element_size);
break;
}
}
#ifndef POINTER64
if(!strcmp("[D", m_name->bytes)) {
// In IA32, Arrays of Doubles need to be eight byte aligned to improve
// performance. In IPF all objects (arrays, class data structures, heap objects)
// get aligned on eight byte boundaries. So, this special code is not needed.
m_alignment = ((GC_OBJECT_ALIGNMENT<8)?8:GC_OBJECT_ALIGNMENT);;
// align doubles on 8, clear alignment field and put in 8.
m_vtable->class_properties |= 8;
// Set high bit in size so that gc knows there are constraints
set_instance_data_size_constraint_bit();
}
#endif
//
// STEP 17 ::: HANDLE ALIGNMENT and Class FINALIZER if needed.
//
if(m_alignment) {
if(m_alignment != GC_OBJECT_ALIGNMENT) {
// The GC will align on 4 byte boundaries by default on IA32....
#ifdef POINTER64
LDIE(67, "Alignment is supposed to be appropriate");
#endif
// Make sure it is a legal mask.
assert((m_alignment & CL_PROP_ALIGNMENT_MASK) <= CL_PROP_ALIGNMENT_MASK);
m_vtable->class_properties |= m_alignment;
set_instance_data_size_constraint_bit();
// make sure constraintbit was set.
assert(get_instance_data_size() != m_instance_data_size);
}
}
if(has_finalizer()) {
m_vtable->class_properties |= CL_PROP_FINALIZABLE_MASK;
set_instance_data_size_constraint_bit();
}
//
// STEP 18 ::: SET Class ALLOCATED SIZE to INSTANCE SIZE
//
// Finally set the allocated size field.
m_allocated_size = get_instance_data_size();
//
// STEP 18a: Determine if class should have special access check treatment.
//
static const char* reflect = "java/lang/reflect/";
static const size_t reflect_len = strlen(reflect);
if(strncmp(m_name->bytes, reflect, reflect_len) == 0)
m_can_access_all = 1;
//
// STEP 19 :::: SET class to ST_Prepared state.
//
gc_class_prepared(this, m_vtable);
assert(m_state == ST_InstanceSizeComputed);
m_state = ST_Prepared;
if (is_array()) {
m_state = ST_Initialized;
}
TRACE2("classloader.prepare","class " << m_name->bytes << " prepared");
//
// STEP 20 ::: ASSIGN VALUE to static final fields
//
// Generally speaking final value is inlined, so we wouldn�t need to worry
// about the initialization of those static final fields. But when we use
// reflection mechanisms - Field.getXXX() - to access them, we got
// null values. Considering this, we must initialize those static
// final fields. Also related to this is Binary Compatibility chapter
// section 13.4.8 of the JLS
//
tmn_suspend_disable();
if(!assign_values_to_class_static_final_fields(this))
{
//OOME happened
tmn_suspend_enable();
return false;
}
tmn_suspend_enable();
//
// STEP 21 ::: Link java.lang.Class to struct Class
//
// VM adds an extra field, 'vm_class', to all instances of
// java.lang.Class (see an entry in vm_extra_fields).
// This field is set to point to the corresponding struct Class.
//
// The code below stores the offset to that field in the VM environment.
//
if(m_name == env->JavaLangClass_String) {
String* name = env->string_pool.lookup("vm_class");
String* desc = env->string_pool.lookup("J");
Field* vm_class_field = lookup_field(name, desc);
assert(vm_class_field != NULL);
env->vm_class_offset = vm_class_field->get_offset();
}
assert(m_allocated_size == m_vtable->allocated_size);
assert(m_array_element_size == m_vtable->array_element_size);
if(!env->InBootstrap())
{
autoUnlocker.ForceUnlock();
assert(hythread_is_suspend_enabled());
if(jvmti_should_report_event(JVMTI_EVENT_CLASS_PREPARE)) {
jvmti_send_class_prepare_event(this);
}
}
TRACE2("classloader.prepare", "END class prepare, class name = " << m_name->bytes);
return true;
} // Class::prepare