diff --git a/make/hotspot/lib/JvmFeatures.gmk b/make/hotspot/lib/JvmFeatures.gmk index 0a897230f835b..3771a950645b2 100644 --- a/make/hotspot/lib/JvmFeatures.gmk +++ b/make/hotspot/lib/JvmFeatures.gmk @@ -127,7 +127,8 @@ ifneq ($(call check-jvm-feature, cds), true) JVM_EXCLUDE_FILES += \ classLoaderDataShared.cpp \ classLoaderExt.cpp \ - systemDictionaryShared.cpp + systemDictionaryShared.cpp \ + trainingData.cpp JVM_EXCLUDE_PATTERNS += cds/ endif diff --git a/src/hotspot/share/cds/aotArtifactFinder.cpp b/src/hotspot/share/cds/aotArtifactFinder.cpp index 65eb06ca7f093..2e30a1d121dbb 100644 --- a/src/hotspot/share/cds/aotArtifactFinder.cpp +++ b/src/hotspot/share/cds/aotArtifactFinder.cpp @@ -33,6 +33,7 @@ #include "memory/metaspaceClosure.hpp" #include "oops/instanceKlass.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/trainingData.hpp" #include "utilities/resourceHash.hpp" // All the classes that should be included in the AOT cache (in at least the "allocated" state) @@ -162,6 +163,8 @@ void AOTArtifactFinder::find_artifacts() { }); end_scanning_for_oops(); + + TrainingData::cleanup_training_data(); } void AOTArtifactFinder::start_scanning_for_oops() { diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp index 31d95024e3bfd..8d739e344f284 100644 --- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp +++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp @@ -32,10 +32,12 @@ #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" #include "classfile/vmClasses.hpp" +#include "compiler/compilationPolicy.hpp" #include "gc/shared/gcVMOperations.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" +#include "oops/trainingData.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -48,6 +50,17 @@ void AOTLinkedClassBulkLoader::serialize(SerializeClosure* soc, bool is_static_a AOTLinkedClassTable::get(is_static_archive)->serialize(soc); } +bool AOTLinkedClassBulkLoader::class_preloading_finished() { + if (!CDSConfig::is_using_aot_linked_classes()) { + return true; + } else { + // The ConstantPools of preloaded classes have references to other preloaded classes. We don't + // want any Java code (including JVMCI compiler) to use these classes until all of them + // are loaded. + return Atomic::load_acquire(&_all_completed); + } +} + void AOTLinkedClassBulkLoader::load_javabase_classes(JavaThread* current) { assert(CDSConfig::is_using_aot_linked_classes(), "sanity"); load_classes_in_loader(current, AOTLinkedClassCategory::BOOT1, nullptr); // only java.base classes @@ -70,8 +83,14 @@ void AOTLinkedClassBulkLoader::load_non_javabase_classes(JavaThread* current) { _platform_completed = true; load_classes_in_loader(current, AOTLinkedClassCategory::APP, SystemDictionary::java_system_loader()); + + if (AOTPrintTrainingInfo) { + tty->print_cr("==================== archived_training_data ** after all classes preloaded ===================="); + TrainingData::print_archived_training_data_on(tty); + } + _app_completed = true; - _all_completed = true; + Atomic::release_store(&_all_completed, true); } void AOTLinkedClassBulkLoader::load_classes_in_loader(JavaThread* current, AOTLinkedClassCategory class_category, oop class_loader_oop) { @@ -394,3 +413,25 @@ bool AOTLinkedClassBulkLoader::is_pending_aot_linked_class(Klass* k) { return false; } } + +void AOTLinkedClassBulkLoader::replay_training_at_init(Array* classes, TRAPS) { + if (classes != nullptr) { + for (int i = 0; i < classes->length(); i++) { + InstanceKlass* ik = classes->at(i); + if (ik->has_aot_initialized_mirror() && ik->is_initialized() && !ik->has_init_deps_processed()) { + CompilationPolicy::replay_training_at_init(ik, CHECK); + } + } + } +} + +void AOTLinkedClassBulkLoader::replay_training_at_init_for_preloaded_classes(TRAPS) { + if (CDSConfig::is_using_aot_linked_classes() && TrainingData::have_data()) { + // Only static archive can have training data. + AOTLinkedClassTable* table = AOTLinkedClassTable::for_static_archive(); + replay_training_at_init(table->boot(), CHECK); + replay_training_at_init(table->boot2(), CHECK); + replay_training_at_init(table->platform(), CHECK); + replay_training_at_init(table->app(), CHECK); + } +} \ No newline at end of file diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp index a8e6365b89903..86fb5017eb858 100644 --- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp +++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,7 @@ class AOTLinkedClassBulkLoader : AllStatic { const char* category_name, Handle loader, TRAPS); static void load_hidden_class(ClassLoaderData* loader_data, InstanceKlass* ik, TRAPS); static void init_required_classes_for_loader(Handle class_loader, Array* classes, TRAPS); + static void replay_training_at_init(Array* classes, TRAPS) NOT_CDS_RETURN; public: static void serialize(SerializeClosure* soc, bool is_static_archive) NOT_CDS_RETURN; @@ -63,6 +64,8 @@ class AOTLinkedClassBulkLoader : AllStatic { static void finish_loading_javabase_classes(TRAPS) NOT_CDS_RETURN; static void exit_on_exception(JavaThread* current); + static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN; + static bool class_preloading_finished(); static bool is_pending_aot_linked_class(Klass* k) NOT_CDS_RETURN_(false); }; diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index c309de17b4cad..a0c9e6f911c9d 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -52,9 +52,12 @@ #include "memory/resourceArea.hpp" #include "oops/compressedKlass.inline.hpp" #include "oops/instanceKlass.hpp" +#include "oops/methodCounters.hpp" +#include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oopHandle.inline.hpp" +#include "oops/trainingData.hpp" #include "runtime/arguments.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/globals_extension.hpp" @@ -129,13 +132,27 @@ class RelocateEmbeddedPointers : public BitMapClosure { size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address); address* ptr_loc = (address*)(_buffered_obj + field_offset); - address old_p = *ptr_loc; + address old_p_with_tags = *ptr_loc; + assert(old_p_with_tags != nullptr, "null ptrs shouldn't have been marked"); + + address old_p = MetaspaceClosure::strip_tags(old_p_with_tags); + uintx tags = MetaspaceClosure::decode_tags(old_p_with_tags); address new_p = _builder->get_buffered_addr(old_p); - log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT, - p2i(ptr_loc), p2i(old_p), p2i(new_p)); + bool nulled; + if (new_p == nullptr) { + // old_p had a FollowMode of set_to_null + nulled = true; + } else { + new_p = MetaspaceClosure::add_tags(new_p, tags); + nulled = false; + } + + log_trace(cds)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT " %zu", + p2i(ptr_loc), p2i(old_p) + tags, p2i(new_p), tags); ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p); + ArchiveBuilder::current()->count_relocated_pointer(tags != 0, nulled); return true; // keep iterating the bitmap } }; @@ -175,6 +192,9 @@ ArchiveBuilder::ArchiveBuilder() : _klasses = new (mtClassShared) GrowableArray(4 * K, mtClassShared); _symbols = new (mtClassShared) GrowableArray(256 * K, mtClassShared); _entropy_seed = 0x12345678; + _relocated_ptr_info._num_ptrs = 0; + _relocated_ptr_info._num_tagged_ptrs = 0; + _relocated_ptr_info._num_nulled_ptrs = 0; assert(_current == nullptr, "must be"); _current = this; } @@ -432,6 +452,11 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read } #endif + if (ref->msotype() == MetaspaceObj::MethodDataType) { + MethodData* md = (MethodData*)ref->obj(); + md->clean_method_data(false /* always_clean */); + } + assert(p->read_only() == src_info.read_only(), "must be"); if (created && src_info.should_copy()) { @@ -529,8 +554,11 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref // Don't dump existing shared metadata again. return point_to_it; } else if (ref->msotype() == MetaspaceObj::MethodDataType || - ref->msotype() == MetaspaceObj::MethodCountersType) { - return set_to_null; + ref->msotype() == MetaspaceObj::MethodCountersType || + ref->msotype() == MetaspaceObj::KlassTrainingDataType || + ref->msotype() == MetaspaceObj::MethodTrainingDataType || + ref->msotype() == MetaspaceObj::CompileTrainingDataType) { + return TrainingData::need_data() ? make_a_copy : set_to_null; } else { if (ref->msotype() == MetaspaceObj::ClassType) { Klass* klass = (Klass*)ref->obj(); @@ -739,6 +767,10 @@ void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() { log_info(cds)("Relocating embedded pointers in core regions ... "); relocate_embedded_pointers(&_rw_src_objs); relocate_embedded_pointers(&_ro_src_objs); + log_info(cds)("Relocating %zu pointers, %zu tagged, %zu nulled", + _relocated_ptr_info._num_ptrs, + _relocated_ptr_info._num_tagged_ptrs, + _relocated_ptr_info._num_nulled_ptrs); } #define ADD_COUNT(x) \ @@ -940,6 +972,28 @@ void ArchiveBuilder::make_klasses_shareable() { DynamicArchive::make_array_klasses_shareable(); } +void ArchiveBuilder::make_training_data_shareable() { + auto clean_td = [&] (address& src_obj, SourceObjInfo& info) { + if (!is_in_buffer_space(info.buffered_addr())) { + return; + } + + if (info.msotype() == MetaspaceObj::KlassTrainingDataType || + info.msotype() == MetaspaceObj::MethodTrainingDataType || + info.msotype() == MetaspaceObj::CompileTrainingDataType) { + TrainingData* buffered_td = (TrainingData*)info.buffered_addr(); + buffered_td->remove_unshareable_info(); + } else if (info.msotype() == MetaspaceObj::MethodDataType) { + MethodData* buffered_mdo = (MethodData*)info.buffered_addr(); + buffered_mdo->remove_unshareable_info(); + } else if (info.msotype() == MetaspaceObj::MethodCountersType) { + MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr(); + buffered_mc->remove_unshareable_info(); + } + }; + _src_obj_table.iterate_all(clean_td); +} + void ArchiveBuilder::serialize_dynamic_archivable_items(SerializeClosure* soc) { SymbolTable::serialize_shared_table_header(soc, false); SystemDictionaryShared::serialize_dictionary_headers(soc, false); @@ -1560,6 +1614,12 @@ void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegi mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); } +void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) { + _relocated_ptr_info._num_ptrs ++; + _relocated_ptr_info._num_tagged_ptrs += tagged ? 1 : 0; + _relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0; +} + void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) { // Print statistics of all the regions const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used(); diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index 5913ae29c7878..73cf334196b31 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -237,6 +237,11 @@ class ArchiveBuilder : public StackObj { // statistics DumpAllocStats _alloc_stats; size_t _total_heap_region_size; + struct { + size_t _num_ptrs; + size_t _num_tagged_ptrs; + size_t _num_nulled_ptrs; + } _relocated_ptr_info; void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info); void print_bitmap_region_stats(size_t size, size_t total_size); @@ -257,6 +262,8 @@ class ArchiveBuilder : public StackObj { ~OtherROAllocMark(); }; + void count_relocated_pointer(bool tagged, bool nulled); + private: FollowMode get_follow_mode(MetaspaceClosure::Ref *ref); @@ -411,6 +418,7 @@ class ArchiveBuilder : public StackObj { void relocate_metaspaceobj_embedded_pointers(); void record_regenerated_object(address orig_src_obj, address regen_src_obj); void make_klasses_shareable(); + void make_training_data_shareable(); void relocate_to_requested(); void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info); void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, @@ -433,7 +441,8 @@ class ArchiveBuilder : public StackObj { address get_buffered_addr(address src_addr) const; template T get_buffered_addr(T src_addr) const { - return (T)get_buffered_addr((address)src_addr); + CDS_ONLY(return (T)get_buffered_addr((address)src_addr);) + NOT_CDS(return nullptr;) } address get_source_addr(address buffered_addr) const; @@ -446,7 +455,8 @@ class ArchiveBuilder : public StackObj { GrowableArray* symbols() const { return _symbols; } static bool is_active() { - return (_current != nullptr); + CDS_ONLY(return (_current != nullptr)); + NOT_CDS(return false;) } static ArchiveBuilder* current() { diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index 64ad07b0cf816..d3170cf377a6c 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -508,6 +508,8 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla FLAG_SET_ERGO_IF_DEFAULT(AOTClassLinking, true); } + setup_compiler_args(); + if (AOTClassLinking) { // If AOTClassLinking is specified, enable all AOT optimizations by default. FLAG_SET_ERGO_IF_DEFAULT(AOTInvokeDynamicLinking, true); @@ -583,6 +585,28 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla return true; } +void CDSConfig::setup_compiler_args() { + // AOT profiles are supported only in the JEP 483 workflow. + bool can_dump_profiles = AOTClassLinking && new_aot_flags_used(); + + if (is_dumping_preimage_static_archive() && can_dump_profiles) { + // JEP 483 workflow -- training + FLAG_SET_ERGO_IF_DEFAULT(AOTRecordTraining, true); + FLAG_SET_ERGO(AOTReplayTraining, false); + } else if (is_dumping_final_static_archive() && can_dump_profiles) { + // JEP 483 workflow -- assembly + FLAG_SET_ERGO(AOTRecordTraining, false); // This will be updated inside MetaspaceShared::preload_and_dump() + FLAG_SET_ERGO_IF_DEFAULT(AOTReplayTraining, true); + } else if (is_using_archive() && new_aot_flags_used()) { + // JEP 483 workflow -- production + FLAG_SET_ERGO(AOTRecordTraining, false); + FLAG_SET_ERGO_IF_DEFAULT(AOTReplayTraining, true); + } else { + FLAG_SET_ERGO(AOTReplayTraining, false); + FLAG_SET_ERGO(AOTRecordTraining, false); + } +} + void CDSConfig::prepare_for_dumping() { assert(CDSConfig::is_dumping_archive(), "sanity"); diff --git a/src/hotspot/share/cds/cdsConfig.hpp b/src/hotspot/share/cds/cdsConfig.hpp index e96291f653487..c61ab283b5b1b 100644 --- a/src/hotspot/share/cds/cdsConfig.hpp +++ b/src/hotspot/share/cds/cdsConfig.hpp @@ -67,6 +67,7 @@ class CDSConfig : public AllStatic { static void check_aotmode_auto_or_on(); static void check_aotmode_record(); static void check_aotmode_create(); + static void setup_compiler_args(); static void check_unsupported_dumping_module_options(); // Called after Arguments::apply_ergo() has started diff --git a/src/hotspot/share/cds/cds_globals.hpp b/src/hotspot/share/cds/cds_globals.hpp index 2dae9b452212e..3a616b368d43b 100644 --- a/src/hotspot/share/cds/cds_globals.hpp +++ b/src/hotspot/share/cds/cds_globals.hpp @@ -127,6 +127,23 @@ product(bool, AOTCacheParallelRelocation, true, DIAGNOSTIC, \ "Use parallel relocation code to speed up startup.") \ \ + /* flags to control training and deployment modes */ \ + \ + product(bool, AOTRecordTraining, false, DIAGNOSTIC, \ + "Request output of training data for improved deployment.") \ + \ + product(bool, AOTReplayTraining, false, DIAGNOSTIC, \ + "Read training data, if available, for use in this execution") \ + \ + product(bool, AOTPrintTrainingInfo, false, DIAGNOSTIC, \ + "Print additional information about training") \ + \ + product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \ + "Verify archived training data") \ + \ + product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \ + "Compile methods as soon as possible") \ + \ // end of CDS_FLAGS DECLARE_FLAGS(CDS_FLAGS) diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp index b8243cedf6d2c..ff04344bde463 100644 --- a/src/hotspot/share/cds/cppVtables.cpp +++ b/src/hotspot/share/cds/cppVtables.cpp @@ -32,7 +32,9 @@ #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/instanceStackChunkKlass.hpp" +#include "oops/methodCounters.hpp" #include "oops/methodData.hpp" +#include "oops/trainingData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/typeArrayKlass.hpp" #include "runtime/arguments.hpp" @@ -60,8 +62,13 @@ f(InstanceRefKlass) \ f(InstanceStackChunkKlass) \ f(Method) \ + f(MethodData) \ + f(MethodCounters) \ f(ObjArrayKlass) \ - f(TypeArrayKlass) + f(TypeArrayKlass) \ + f(KlassTrainingData) \ + f(MethodTrainingData) \ + f(CompileTrainingData) class CppVtableInfo { intptr_t _vtable_size; @@ -279,14 +286,9 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob case MetaspaceObj::ConstMethodType: case MetaspaceObj::ConstantPoolCacheType: case MetaspaceObj::AnnotationsType: - case MetaspaceObj::MethodCountersType: case MetaspaceObj::RecordComponentType: // These have no vtables. break; - case MetaspaceObj::MethodDataType: - // We don't archive MethodData <-- should have been removed in removed_unsharable_info - ShouldNotReachHere(); - break; default: for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) { if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind] || diff --git a/src/hotspot/share/cds/dumpAllocStats.cpp b/src/hotspot/share/cds/dumpAllocStats.cpp index 5587ac2fac82a..c649a524ac189 100644 --- a/src/hotspot/share/cds/dumpAllocStats.cpp +++ b/src/hotspot/share/cds/dumpAllocStats.cpp @@ -23,6 +23,7 @@ */ #include "cds/aotClassLinker.hpp" +#include "cds/cdsConfig.hpp" #include "cds/dumpAllocStats.hpp" #include "logging/log.hpp" #include "logging/logMessage.hpp" @@ -118,8 +119,17 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) { _num_indy_cp_entries, _num_indy_cp_entries_archived, percent_of(_num_indy_cp_entries_archived, _num_indy_cp_entries), _num_indy_cp_entries_reverted); - msg.info("Platform loader initiated classes = %5d", AOTClassLinker::num_platform_initiated_classes()); - msg.info("App loader initiated classes = %5d", AOTClassLinker::num_app_initiated_classes()); + msg.info("Platform loader initiated classes = %6d", AOTClassLinker::num_platform_initiated_classes()); + msg.info("App loader initiated classes = %6d", AOTClassLinker::num_app_initiated_classes()); + msg.info("Dynamic proxy classes = %6d%s", _num_dynamic_proxy_classes, + CDSConfig::is_dumping_full_module_graph() ? "" : " (not archiving FMG)"); + msg.info("MethodCounters = %6d (%8d bytes)", _counts[RW][MethodCountersType], + _bytes [RW][MethodCountersType]); + msg.info("KlassTrainingData = %6d (%8d bytes)", _counts[RW][KlassTrainingDataType], + _bytes [RW][KlassTrainingDataType]); + msg.info("MethodTrainingData = %6d (%8d bytes)", _counts[RW][MethodTrainingDataType], + _bytes [RW][MethodTrainingDataType]); + } #ifdef ASSERT diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp index 7d651320e6f32..1a286c2dd57e4 100644 --- a/src/hotspot/share/cds/dumpAllocStats.hpp +++ b/src/hotspot/share/cds/dumpAllocStats.hpp @@ -77,6 +77,7 @@ class DumpAllocStats : public StackObj { int _num_method_cp_entries; int _num_method_cp_entries_archived; int _num_method_cp_entries_reverted; + int _num_dynamic_proxy_classes; public: enum { RO = 0, RW = 1 }; @@ -96,6 +97,7 @@ class DumpAllocStats : public StackObj { _num_method_cp_entries = 0; _num_method_cp_entries_archived = 0; _num_method_cp_entries_reverted = 0; + _num_dynamic_proxy_classes = 0; }; CompactHashtableStats* symbol_stats() { return &_symbol_stats; } @@ -146,6 +148,11 @@ class DumpAllocStats : public StackObj { _num_method_cp_entries_reverted += reverted ? 1 : 0; } + void record_dynamic_proxy_class() { + _num_dynamic_proxy_classes ++; + } + + void print_stats(int ro_all, int rw_all); DEBUG_ONLY(void verify(int expected_byte_size, bool read_only) const); diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index ef2a6dcb8e63c..3ac2b1c151840 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -78,6 +78,7 @@ #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.hpp" +#include "oops/trainingData.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/globals.hpp" @@ -480,6 +481,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) { SystemDictionaryShared::serialize_dictionary_headers(soc); AOTLinkedClassBulkLoader::serialize(soc, true); FinalImageRecipes::serialize(soc); + TrainingData::serialize(soc); InstanceMirrorKlass::serialize_offsets(soc); // Dump/restore well known classes (pointers) @@ -564,6 +566,7 @@ class StaticArchiveBuilder : public ArchiveBuilder { SystemDictionaryShared::dumptime_classes_do(it); Universe::metaspace_pointers_do(it); vmSymbols::metaspace_pointers_do(it); + TrainingData::iterate_roots(it); // The above code should find all the symbols that are referenced by the // archived classes. We just need to add the extra symbols which @@ -603,6 +606,9 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables(AOTClassLocationConfig*& if (CDSConfig::is_dumping_preimage_static_archive()) { FinalImageRecipes::record_recipes(); } + + TrainingData::dump_training_data(); + MetaspaceShared::write_method_handle_intrinsics(); // Write lambform lines into archive @@ -673,6 +679,9 @@ void VM_PopulateDumpSharedSpace::doit() { LambdaProxyClassDictionary::adjust_dumptime_table(); } + log_info(cds)("Make training data shareable"); + _builder.make_training_data_shareable(); + // The vtable clones contain addresses of the current process. // We don't want to write these addresses into the archive. CppVtables::zero_archived_vtables(); @@ -791,6 +800,13 @@ void MetaspaceShared::link_shared_classes(TRAPS) { void MetaspaceShared::preload_and_dump(TRAPS) { CDSConfig::DumperThreadMark dumper_thread_mark(THREAD); ResourceMark rm(THREAD); + HandleMark hm(THREAD); + + if (CDSConfig::is_dumping_final_static_archive() && AOTPrintTrainingInfo) { + tty->print_cr("==================== archived_training_data ** before dumping ===================="); + TrainingData::print_archived_training_data_on(tty); + } + StaticArchiveBuilder builder; preload_and_dump_impl(builder, THREAD); if (HAS_PENDING_EXCEPTION) { @@ -954,6 +970,14 @@ void MetaspaceShared::preload_and_dump_impl(StaticArchiveBuilder& builder, TRAPS // are implemented by K are not verified. link_shared_classes(CHECK); log_info(cds)("Rewriting and linking classes: done"); + if (CDSConfig::is_dumping_final_static_archive()) { + assert(!AOTRecordTraining, "must be"); + if (CDSConfig::is_dumping_aot_linked_classes()) { + AOTRecordTraining = true; + } + } + + TrainingData::init_dumptime_table(CHECK); // captures TrainingDataSetLocker if (CDSConfig::is_dumping_regenerated_lambdaform_invokers()) { LambdaFormInvokers::regenerate_holder_classes(CHECK); @@ -1822,7 +1846,7 @@ void MetaspaceShared::initialize_shared_spaces() { tty->print_cr("Dynamic archive version %d", dynamic_mapinfo->version()); SystemDictionaryShared::print_shared_archive(tty, false/*dynamic*/); } - + TrainingData::print_archived_training_data_on(tty); // collect shared symbols and strings CountSharedSymbols cl; SymbolTable::shared_symbols_do(&cl); diff --git a/src/hotspot/share/cds/runTimeClassInfo.hpp b/src/hotspot/share/cds/runTimeClassInfo.hpp index 8ad2efcbccb9a..058d518188114 100644 --- a/src/hotspot/share/cds/runTimeClassInfo.hpp +++ b/src/hotspot/share/cds/runTimeClassInfo.hpp @@ -264,7 +264,11 @@ class RunTimeClassInfo { // Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS static inline bool EQUALS( const RunTimeClassInfo* value, Symbol* key, int len_unused) { +#if INCLUDE_CDS return (value->klass()->name() == key); +#else + return false; +#endif } }; diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp index 3991773d86fa3..4513ba2e5ac59 100644 --- a/src/hotspot/share/ci/ciEnv.cpp +++ b/src/hotspot/share/ci/ciEnv.cpp @@ -1159,6 +1159,13 @@ int ciEnv::compile_id() { // ciEnv::notice_inlined_method() void ciEnv::notice_inlined_method(ciMethod* method) { _num_inlined_bytecodes += method->code_size_for_inlining(); + CompileTrainingData* ctd = task()->training_data(); + if (ctd != nullptr) { + GUARDED_VM_ENTRY({ + methodHandle mh(Thread::current(), method->get_Method()); + ctd->notice_inlined_method(task(), mh); + }); + } } // ------------------------------------------------------------------ diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp index 69b73152d37bd..92a7434e8ba06 100644 --- a/src/hotspot/share/ci/ciInstanceKlass.hpp +++ b/src/hotspot/share/ci/ciInstanceKlass.hpp @@ -44,6 +44,7 @@ class ciInstanceKlass : public ciKlass { friend class ciMethod; friend class ciField; friend class ciReplay; + friend class CompileTrainingData; private: enum SubklassValue { subklass_unknown, subklass_false, subklass_true }; diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp index 3b2670c3eb0bc..47a606e1ecc8e 100644 --- a/src/hotspot/share/ci/ciMethod.cpp +++ b/src/hotspot/share/ci/ciMethod.cpp @@ -36,6 +36,7 @@ #include "compiler/abstractCompiler.hpp" #include "compiler/compilerDefinitions.inline.hpp" #include "compiler/compilerOracle.hpp" +#include "compiler/compileTask.hpp" #include "compiler/methodLiveness.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" @@ -47,6 +48,7 @@ #include "oops/generateOopMap.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/trainingData.hpp" #include "prims/methodHandles.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" @@ -1142,6 +1144,28 @@ int ciMethod::code_size_for_inlining() { // Also some instructions inside the code are excluded from inline // heuristic (e.g. post call nop instructions; see InlineSkippedInstructionsCounter) int ciMethod::inline_instructions_size() { + if (_inline_instructions_size == -1) { + if (TrainingData::have_data()) { + GUARDED_VM_ENTRY( + CompLevel level = static_cast(CURRENT_ENV->comp_level()); + methodHandle top_level_mh(Thread::current(), CURRENT_ENV->task()->method()); + MethodTrainingData* mtd = MethodTrainingData::find(top_level_mh); + if (mtd != nullptr) { + CompileTrainingData* ctd = mtd->last_toplevel_compile(level); + if (ctd != nullptr) { + methodHandle mh(Thread::current(), get_Method()); + MethodTrainingData* this_mtd = MethodTrainingData::find(mh); + if (this_mtd != nullptr) { + auto r = ctd->ci_records().ciMethod__inline_instructions_size.find(this_mtd); + if (r.is_valid()) { + _inline_instructions_size = r.result(); + } + } + } + } + ); + } + } if (_inline_instructions_size == -1) { GUARDED_VM_ENTRY( nmethod* code = get_Method()->code(); @@ -1151,6 +1175,14 @@ int ciMethod::inline_instructions_size() { } else { _inline_instructions_size = 0; } + if (TrainingData::need_data()) { + CompileTrainingData* ctd = CURRENT_ENV->task()->training_data(); + if (ctd != nullptr) { + methodHandle mh(Thread::current(), get_Method()); + MethodTrainingData* this_mtd = MethodTrainingData::make(mh); + ctd->ci_records().ciMethod__inline_instructions_size.append_if_missing(_inline_instructions_size, this_mtd); + } + } ); } return _inline_instructions_size; diff --git a/src/hotspot/share/ci/ciMethodData.cpp b/src/hotspot/share/ci/ciMethodData.cpp index a37e4ba75e6f5..2d0af4b50c16e 100644 --- a/src/hotspot/share/ci/ciMethodData.cpp +++ b/src/hotspot/share/ci/ciMethodData.cpp @@ -68,7 +68,11 @@ class PrepareExtraDataClosure : public CleanExtraDataClosure { { } bool is_live(Method* m) { - if (!m->method_holder()->is_loader_alive()) { + Klass* holder = m->method_holder(); + if (holder == nullptr || + holder->class_loader_data() == nullptr || + !holder->is_loader_alive() || + (holder->is_instance_klass() && !InstanceKlass::cast(holder)->is_loaded())) { return false; } if (CURRENT_ENV->cached_metadata(m) == nullptr) { @@ -303,7 +307,8 @@ bool ciMethodData::load_data() { void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) { for (uint row = 0; row < row_limit(); row++) { Klass* k = data->as_ReceiverTypeData()->receiver(row); - if (k != nullptr) { + if (k != nullptr && k->class_loader_data() != nullptr && + (!k->is_instance_klass() || InstanceKlass::cast(k)->is_loaded())) { if (k->is_loader_alive()) { ciKlass* klass = CURRENT_ENV->get_klass(k); set_receiver(row, klass); @@ -321,7 +326,9 @@ void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries for (int i = 0; i < number_of_entries(); i++) { intptr_t k = entries->type(i); Klass* klass = (Klass*)klass_part(k); - if (klass != nullptr && !klass->is_loader_alive()) { + if (klass != nullptr && + ((klass->is_instance_klass() && !InstanceKlass::cast(klass)->is_loaded()) || + (klass->class_loader_data() == nullptr || !klass->is_loader_alive()))) { // With concurrent class unloading, the MDO could have stale metadata; override it TypeStackSlotEntries::set_type(i, TypeStackSlotEntries::with_status((Klass*)nullptr, k)); } else { @@ -333,7 +340,9 @@ void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) { intptr_t k = ret->type(); Klass* klass = (Klass*)klass_part(k); - if (klass != nullptr && !klass->is_loader_alive()) { + if (klass != nullptr && + ((klass->is_instance_klass() && !InstanceKlass::cast(klass)->is_loaded()) || + (klass->class_loader_data() == nullptr || !klass->is_loader_alive()))) { // With concurrent class unloading, the MDO could have stale metadata; override it set_type(ReturnTypeEntry::with_status((Klass*)nullptr, k)); } else { diff --git a/src/hotspot/share/ci/ciObjectFactory.cpp b/src/hotspot/share/ci/ciObjectFactory.cpp index 1fa590e4ad338..2af5d812922fe 100644 --- a/src/hotspot/share/ci/ciObjectFactory.cpp +++ b/src/hotspot/share/ci/ciObjectFactory.cpp @@ -44,10 +44,12 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/vmClasses.hpp" #include "compiler/compiler_globals.hpp" +#include "compiler/compileTask.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" +#include "oops/trainingData.hpp" #include "runtime/handles.inline.hpp" #include "runtime/signature.hpp" #include "utilities/macros.hpp" @@ -108,7 +110,7 @@ void ciObjectFactory::initialize() { // This Arena is long lived and exists in the resource mark of the // compiler thread that initializes the initial ciObjectFactory which // creates the shared ciObjects that all later ciObjectFactories use. - Arena* arena = new (mtCompiler) Arena(mtCompiler, Arena::Tag::tag_cienv); + Arena* arena = new (mtCompiler) Arena(mtCompiler); ciEnv initial(arena); ciEnv* env = ciEnv::current(); env->_factory->init_shared_objects(); @@ -232,26 +234,40 @@ void ciObjectFactory::remove_symbols() { ciObject* ciObjectFactory::get(oop key) { ASSERT_IN_VM; - assert(Universe::heap()->is_in(key), "must be"); + Handle keyHandle(Thread::current(), key); + assert(Universe::heap()->is_in(keyHandle()), "must be"); - NonPermObject* &bucket = find_non_perm(key); + NonPermObject* &bucket = find_non_perm(keyHandle); if (bucket != nullptr) { return bucket->object(); } // The ciObject does not yet exist. Create it and insert it // into the cache. - Handle keyHandle(Thread::current(), key); ciObject* new_object = create_new_object(keyHandle()); assert(keyHandle() == new_object->get_oop(), "must be properly recorded"); init_ident_of(new_object); assert(Universe::heap()->is_in(new_object->get_oop()), "must be"); // Not a perm-space object. - insert_non_perm(bucket, keyHandle(), new_object); + insert_non_perm(bucket, keyHandle, new_object); + notice_new_object(new_object); return new_object; } +void ciObjectFactory::notice_new_object(ciBaseObject* new_object) { + if (TrainingData::need_data()) { + ciEnv* env = ciEnv::current(); + if (env->task() != nullptr) { + // Note: task will be null during init_compiler_runtime. + CompileTrainingData* td = env->task()->training_data(); + if (td != nullptr) { + td->notice_jit_observation(env, new_object); + } + } + } +} + int ciObjectFactory::metadata_compare(Metadata* const& key, ciMetadata* const& elt) { Metadata* value = elt->constant_encoding(); if (key < value) return -1; @@ -331,6 +347,7 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) { } assert(!found, "no double insert"); _ci_metadata.insert_before(index, new_object); + notice_new_object(new_object); return new_object; } return _ci_metadata.at(index)->as_metadata(); @@ -636,12 +653,12 @@ static ciObjectFactory::NonPermObject* emptyBucket = nullptr; // Use a small hash table, hashed on the klass of the key. // If there is no entry in the cache corresponding to this oop, return // the null tail of the bucket into which the oop should be inserted. -ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) { - assert(Universe::heap()->is_in(key), "must be"); - ciMetadata* klass = get_metadata(key->klass()); +ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(Handle keyHandle) { + assert(Universe::heap()->is_in(keyHandle()), "must be"); + ciMetadata* klass = get_metadata(keyHandle->klass()); // This may safepoint! NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS]; for (NonPermObject* p; (p = (*bp)) != nullptr; bp = &p->next()) { - if (is_equal(p, key)) break; + if (is_equal(p, keyHandle())) break; } return (*bp); } @@ -664,12 +681,12 @@ inline ciObjectFactory::NonPermObject::NonPermObject(ciObjectFactory::NonPermObj // ciObjectFactory::insert_non_perm // // Insert a ciObject into the non-perm table. -void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, oop key, ciObject* obj) { - assert(Universe::heap()->is_in_or_null(key), "must be"); +void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, Handle keyHandle, ciObject* obj) { + assert(Universe::heap()->is_in_or_null(keyHandle()), "must be"); assert(&where != &emptyBucket, "must not try to fill empty bucket"); - NonPermObject* p = new (arena()) NonPermObject(where, key, obj); - assert(where == p && is_equal(p, key) && p->object() == obj, "entry must match"); - assert(find_non_perm(key) == p, "must find the same spot"); + NonPermObject* p = new (arena()) NonPermObject(where, keyHandle(), obj); + assert(where == p && is_equal(p, keyHandle()) && p->object() == obj, "entry must match"); + assert(find_non_perm(keyHandle) == p, "must find the same spot"); ++_non_perm_count; } diff --git a/src/hotspot/share/ci/ciObjectFactory.hpp b/src/hotspot/share/ci/ciObjectFactory.hpp index d95a7d1ff22c4..e4b0c49cc92a8 100644 --- a/src/hotspot/share/ci/ciObjectFactory.hpp +++ b/src/hotspot/share/ci/ciObjectFactory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ // which ensures that for each oop, at most one ciObject is created. // This invariant allows efficient implementation of ciObject. class ciObjectFactory : public ArenaObj { + friend class VMStructs; friend class ciEnv; private: @@ -77,8 +78,8 @@ class ciObjectFactory : public ArenaObj { return p->object()->get_oop() == key; } - NonPermObject* &find_non_perm(oop key); - void insert_non_perm(NonPermObject* &where, oop key, ciObject* obj); + NonPermObject* &find_non_perm(Handle keyHandle); + void insert_non_perm(NonPermObject* &where, Handle keyHandle, ciObject* obj); void init_ident_of(ciBaseObject* obj); @@ -106,6 +107,9 @@ class ciObjectFactory : public ArenaObj { // Get the ciSymbol corresponding to one of the vmSymbols. static ciSymbol* vm_symbol_at(vmSymbolID index); + // Called on every new object made. + void notice_new_object(ciBaseObject* new_object); + // Get the ciMethod representing an unloaded/unfound method. ciMethod* get_unloaded_method(ciInstanceKlass* holder, ciSymbol* name, diff --git a/src/hotspot/share/classfile/compactHashtable.hpp b/src/hotspot/share/classfile/compactHashtable.hpp index 2985f0f9a1a43..83299eda8c7e0 100644 --- a/src/hotspot/share/classfile/compactHashtable.hpp +++ b/src/hotspot/share/classfile/compactHashtable.hpp @@ -241,6 +241,7 @@ template < bool (*EQUALS)(V value, K key, int len) > class CompactHashtable : public SimpleCompactHashtable { + friend class VMStructs; V decode(u4 offset) const { return DECODE(_base_address, offset); @@ -282,7 +283,15 @@ class CompactHashtable : public SimpleCompactHashtable { } template - inline void iterate(ITER* iter) const { + inline void iterate(ITER* iter) const { iterate([&](V v) { iter->do_value(v); }); } + + template + inline void iterate(const Function& function) const { // lambda enabled API + iterate(const_cast(function)); + } + + template + inline void iterate(Function& function) const { // lambda enabled API for (u4 i = 0; i < _bucket_count; i++) { u4 bucket_info = _buckets[i]; u4 bucket_offset = BUCKET_OFFSET(bucket_info); @@ -290,11 +299,11 @@ class CompactHashtable : public SimpleCompactHashtable { u4* entry = _entries + bucket_offset; if (bucket_type == VALUE_ONLY_BUCKET_TYPE) { - iter->do_value(decode(entry[0])); + function(decode(entry[0])); } else { - u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]); + u4* entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]); while (entry < entry_max) { - iter->do_value(decode(entry[1])); + function(decode(entry[1])); entry += 2; } } diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 5c4ee3f9452ad..78fa8760bf627 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -974,7 +974,7 @@ void SystemDictionaryShared::copy_linking_constraints_from_preimage(InstanceKlas } unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) { - if (ArchiveBuilder::is_active()) { + if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space(ptr)) { uintx offset = ArchiveBuilder::current()->any_to_offset(ptr); unsigned int hash = primitive_hash(offset); DEBUG_ONLY({ diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp index fa18b3c8ab4c0..901e7ca567edb 100644 --- a/src/hotspot/share/compiler/compilationPolicy.cpp +++ b/src/hotspot/share/compiler/compilationPolicy.cpp @@ -22,6 +22,7 @@ * */ +#include "cds/aotLinkedClassBulkLoader.hpp" #include "code/scopeDesc.hpp" #include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" @@ -31,6 +32,7 @@ #include "oops/method.inline.hpp" #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" +#include "oops/trainingData.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/deoptimization.hpp" @@ -50,11 +52,13 @@ #include "jvmci/jvmci.hpp" #endif -jlong CompilationPolicy::_start_time = 0; +int64_t CompilationPolicy::_start_time = 0; int CompilationPolicy::_c1_count = 0; int CompilationPolicy::_c2_count = 0; double CompilationPolicy::_increase_threshold_at_ratio = 0; +CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue; + void compilationPolicy_init() { CompilationPolicy::initialize(); } @@ -78,36 +82,119 @@ bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) if (m->has_compiled_code()) return false; // already compiled if (!can_be_compiled(m, comp_level)) return false; - return !UseInterpreter || // must compile all methods + return !UseInterpreter || // must compile all methods (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods } +void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) { + if (m->method_holder()->is_not_initialized()) { + // 'is_not_initialized' means not only '!is_initialized', but also that + // initialization has not been started yet ('!being_initialized') + // Do not force compilation of methods in uninitialized classes. + return; + } + if (!m->is_native() && MethodTrainingData::have_data()) { + MethodTrainingData* mtd = MethodTrainingData::find_fast(m); + if (mtd == nullptr) { + return; // there is no training data recorded for m + } + CompLevel cur_level = static_cast(m->highest_comp_level()); + CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD); + if (next_level != cur_level && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) { + if (PrintTieredEvents) { + print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level); + } + CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + } + } +} + void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) { + if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { + // don't force compilation, resolve was on behalf of compiler + return; + } + if (m->method_holder()->is_not_initialized()) { + // 'is_not_initialized' means not only '!is_initialized', but also that + // initialization has not been started yet ('!being_initialized') + // Do not force compilation of methods in uninitialized classes. + // Note that doing this would throw an assert later, + // in CompileBroker::compile_method. + // We sometimes use the link resolver to do reflective lookups + // even before classes are initialized. + return; + } + if (must_be_compiled(m)) { // This path is unusual, mostly used by the '-Xcomp' stress test mode. - - if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { - // don't force compilation, resolve was on behalf of compiler - return; - } - if (m->method_holder()->is_not_initialized()) { - // 'is_not_initialized' means not only '!is_initialized', but also that - // initialization has not been started yet ('!being_initialized') - // Do not force compilation of methods in uninitialized classes. - // Note that doing this would throw an assert later, - // in CompileBroker::compile_method. - // We sometimes use the link resolver to do reflective lookups - // even before classes are initialized. - return; - } CompLevel level = initial_compile_level(m); if (PrintTieredEvents) { - print_event(COMPILE, m(), m(), InvocationEntryBci, level); + print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level); } CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD); } } +void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) { + if (!klass->has_init_deps_processed()) { + ResourceMark rm; + log_debug(training)("Replay training: %s", klass->external_name()); + + KlassTrainingData* ktd = KlassTrainingData::find(klass); + if (ktd != nullptr) { + guarantee(ktd->has_holder(), ""); + ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit + assert(klass->has_init_deps_processed(), ""); + if (AOTCompileEagerly) { + ktd->iterate_comp_deps([&](CompileTrainingData* ctd) { + if (ctd->init_deps_left() == 0) { + MethodTrainingData* mtd = ctd->method(); + if (mtd->has_holder()) { + const methodHandle mh(THREAD, const_cast(mtd->holder())); + CompilationPolicy::maybe_compile_early(mh, THREAD); + } + } + }); + } + } + } +} + +void CompilationPolicy::flush_replay_training_at_init(TRAPS) { + MonitorLocker locker(THREAD, TrainingReplayQueue_lock); + while (!_training_replay_queue.is_empty_unlocked()) { + locker.wait(); // let the replay training thread drain the queue + } +} + +void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) { + assert(klass->is_initialized(), ""); + if (TrainingData::have_data() && klass->is_shared()) { + _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD); + } +} + +// For TrainingReplayQueue +template<> +void CompilationPolicyUtils::Queue::print_on(outputStream* st) { + int pos = 0; + for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) { + ResourceMark rm; + InstanceKlass* ik = cur->value(); + st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name()); + } +} + +void CompilationPolicy::replay_training_at_init_loop(TRAPS) { + while (!CompileBroker::is_compilation_disabled_forever() || AOTVerifyTrainingData) { + InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD); + replay_training_at_init_impl(ik, THREAD); + } +} + static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) { if (comp_level == CompLevel_any) { if (CompilerConfig::is_c1_only()) { @@ -122,7 +209,7 @@ static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_leve // Returns true if m is allowed to be compiled bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) { // allow any levels for WhiteBox - assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level"); + assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level); if (m->is_abstract()) return false; if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; @@ -322,7 +409,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) { return 1; } -void CompilationPolicy::print_counters(const char* prefix, const Method* m) { +void CompilationPolicy::print_counters(const char* prefix, Method* m) { int invocation_count = m->invocation_count(); int backedge_count = m->backedge_count(); MethodData* mdh = m->method_data(); @@ -342,8 +429,36 @@ void CompilationPolicy::print_counters(const char* prefix, const Method* m) { m->highest_comp_level(), m->highest_osr_comp_level()); } +void CompilationPolicy::print_training_data(const char* prefix, Method* method) { + methodHandle m(Thread::current(), method); + tty->print(" %smtd: ", prefix); + MethodTrainingData* mtd = MethodTrainingData::find(m); + if (mtd == nullptr) { + tty->print("null"); + } else { + MethodData* md = mtd->final_profile(); + tty->print("mdo="); + if (md == nullptr) { + tty->print("null"); + } else { + int mdo_invocations = md->invocation_count(); + int mdo_backedges = md->backedge_count(); + int mdo_invocations_start = md->invocation_count_start(); + int mdo_backedges_start = md->backedge_count_start(); + tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start); + } + CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization); + tty->print(", deps="); + if (ctd == nullptr) { + tty->print("null"); + } else { + tty->print("%d", ctd->init_deps_left()); + } + } +} + // Print an event. -void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) { +void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) { bool inlinee_event = m != im; ttyLocker tty_lock; @@ -359,6 +474,9 @@ void CompilationPolicy::print_event(EventType type, const Method* m, const Metho case COMPILE: tty->print("compile"); break; + case FORCE_COMPILE: + tty->print("force-compile"); + break; case REMOVE_FROM_QUEUE: tty->print("remove-from-queue"); break; @@ -424,6 +542,10 @@ void CompilationPolicy::print_event(EventType type, const Method* m, const Metho if (m->queued_for_compilation()) { tty->print("in-queue"); } else tty->print("idle"); + print_training_data("", m); + if (inlinee_event) { + print_training_data("inlinee ", im); + } } tty->print_cr("]"); } @@ -617,12 +739,12 @@ void CompilationPolicy::handle_counter_overflow(const methodHandle& method) { } // Called with the queue locked and with at least one element -CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) { +CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) { CompileTask *max_blocking_task = nullptr; CompileTask *max_task = nullptr; Method* max_method = nullptr; - jlong t = nanos_to_millis(os::javaTimeNanos()); + int64_t t = nanos_to_millis(os::javaTimeNanos()); // Iterate through the queue and find a method with a maximum rate. for (CompileTask* task = compile_queue->first(); task != nullptr;) { CompileTask* next_task = task->next(); @@ -639,7 +761,7 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) { return task; } Method* method = task->method(); - methodHandle mh(Thread::current(), method); + methodHandle mh(THREAD, method); if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) { if (PrintTieredEvents) { print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level()); @@ -675,7 +797,7 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) { max_method = max_task->method(); } - methodHandle max_method_h(Thread::current(), max_method); + methodHandle max_method_h(THREAD, max_method); if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile && max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) { @@ -694,7 +816,6 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) { print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); } } - return max_task; } @@ -717,6 +838,13 @@ nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level); } +#if INCLUDE_JVMCI + if (EnableJVMCI && UseJVMCICompiler && + comp_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) { + return nullptr; + } +#endif + if (comp_level == CompLevel_none && JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { @@ -817,7 +945,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level } // update_rate() is called from select_task() while holding a compile queue lock. -void CompilationPolicy::update_rate(jlong t, const methodHandle& method) { +void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) { // Skip update if counters are absent. // Can't allocate them since we are holding compile queue lock. if (method->method_counters() == nullptr) return; @@ -831,8 +959,8 @@ void CompilationPolicy::update_rate(jlong t, const methodHandle& method) { // We don't update the rate if we've just came out of a safepoint. // delta_s is the time since last safepoint in milliseconds. - jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms(); - jlong delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement + int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms(); + int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement // How many events were there since the last time? int event_count = method->invocation_count() + method->backedge_count(); int delta_e = event_count - method->prev_event_count(); @@ -855,9 +983,9 @@ void CompilationPolicy::update_rate(jlong t, const methodHandle& method) { // Check if this method has been stale for a given number of milliseconds. // See select_task(). -bool CompilationPolicy::is_stale(jlong t, jlong timeout, const methodHandle& method) { - jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms(); - jlong delta_t = t - method->prev_time(); +bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) { + int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms(); + int64_t delta_t = t - method->prev_time(); if (delta_t > timeout && delta_s > timeout) { int event_count = method->invocation_count() + method->backedge_count(); int delta_e = event_count - method->prev_event_count(); @@ -908,13 +1036,12 @@ bool CompilationPolicy::is_method_profiled(const methodHandle& method) { // Determine is a method is mature. -bool CompilationPolicy::is_mature(Method* method) { +bool CompilationPolicy::is_mature(MethodData* mdo) { if (Arguments::is_compiler_only()) { // Always report profiles as immature with -Xcomp return false; } - methodHandle mh(Thread::current(), method); - MethodData* mdo = method->method_data(); + methodHandle mh(Thread::current(), mdo->method()); if (mdo != nullptr) { int i = mdo->invocation_count(); int b = mdo->backedge_count(); @@ -931,9 +1058,18 @@ bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) { return false; } + + if (TrainingData::have_data()) { + MethodTrainingData* mtd = MethodTrainingData::find_fast(method); + if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) { + return true; + } + } + if (is_old(method)) { return true; } + int i = method->invocation_count(); int b = method->backedge_count(); double k = Tier0ProfilingStartPercentage / 100.0; @@ -967,7 +1103,7 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { if (mh->method_data() == nullptr) { Method::build_profiling_method_data(mh, CHECK_AND_CLEAR); } - if (ProfileInterpreter) { + if (ProfileInterpreter && THREAD->has_last_Java_frame()) { MethodData* mdo = mh->method_data(); if (mdo != nullptr) { frame last_frame = THREAD->last_frame(); @@ -980,7 +1116,136 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { } } +CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) { + precond(mtd != nullptr); + precond(cur_level == CompLevel_none); + if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) { + return CompLevel_none; + } + + bool training_has_profile = (mtd->final_profile() != nullptr); + if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) { + return CompLevel_full_profile; + } + + CompLevel highest_training_level = static_cast(mtd->highest_top_level()); + switch (highest_training_level) { + case CompLevel_limited_profile: + case CompLevel_full_profile: + return CompLevel_limited_profile; + case CompLevel_simple: + return CompLevel_simple; + case CompLevel_none: + return CompLevel_none; + default: + break; + } + + // Now handle the case of level 4. + assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level); + if (!training_has_profile) { + // The method was a part of a level 4 compile, but don't have a stored profile, + // we need to profile it. + return CompLevel_full_profile; + } + const bool deopt = (static_cast(method->highest_comp_level()) == CompLevel_full_optimization); + // If we deopted, then we reprofile + if (deopt && !is_method_profiled(method)) { + return CompLevel_full_profile; + } + + CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization); + assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization"); + // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately + if (SkipTier2IfPossible && ctd->init_deps_left() == 0) { + if (method->method_data() == nullptr) { + create_mdo(method, THREAD); + } + return CompLevel_full_optimization; + } + + // Otherwise go to level 2 + return CompLevel_limited_profile; +} + + +CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) { + precond(mtd != nullptr); + precond(cur_level == CompLevel_limited_profile); + + // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready. + + // But first, check if we have a saved profile + bool training_has_profile = (mtd->final_profile() != nullptr); + if (!training_has_profile) { + return CompLevel_full_profile; + } + + + assert(training_has_profile, "Have to have a profile to be here"); + // Check if the method is ready + CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization); + if (ctd != nullptr && ctd->init_deps_left() == 0) { + if (method->method_data() == nullptr) { + create_mdo(method, THREAD); + } + return CompLevel_full_optimization; + } + + // Otherwise stay at the current level + return CompLevel_limited_profile; +} + + +CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) { + precond(mtd != nullptr); + precond(cur_level == CompLevel_full_profile); + + CompLevel highest_training_level = static_cast(mtd->highest_top_level()); + // We have method at the full profile level and we also know that it's possibly an important method. + if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) { + // Check if it is adequately profiled + if (is_method_profiled(method)) { + return CompLevel_full_optimization; + } + } + + // Otherwise stay at the current level + return CompLevel_full_profile; +} + +CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) { + precond(MethodTrainingData::have_data()); + + // If there is no training data recorded for this method, bail out. + if (mtd == nullptr) { + return cur_level; + } + + CompLevel next_level = cur_level; + switch(cur_level) { + default: break; + case CompLevel_none: + next_level = trained_transition_from_none(method, cur_level, mtd, THREAD); + break; + case CompLevel_limited_profile: + next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD); + break; + case CompLevel_full_profile: + next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD); + break; + } + + // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now. + if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) { + return CompLevel_none; + } + if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) { + return CompLevel_none; + } + return (cur_level != next_level) ? limit_level(next_level) : cur_level; +} /* * Method states: @@ -1022,93 +1287,137 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { // Common transition function. Given a predicate determines if a method should transition to another level. template -CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, bool disable_feedback) { +CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) { CompLevel next_level = cur_level; - int i = method->invocation_count(); - int b = method->backedge_count(); if (force_comp_at_level_simple(method)) { next_level = CompLevel_simple; - } else { - if (is_trivial(method) || method->is_native()) { - next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple; + } else if (is_trivial(method) || method->is_native()) { + // We do not care if there is profiling data for these methods, throw them to compiler. + next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple; + } else if (MethodTrainingData::have_data()) { + MethodTrainingData* mtd = MethodTrainingData::find_fast(method); + if (mtd == nullptr) { + // We haven't see compilations of this method in training. It's either very cold or the behavior changed. + // Feed it to the standard TF with no profiling delay. + next_level = standard_transition(method, cur_level, false /*delay_profiling*/, disable_feedback); } else { - switch(cur_level) { - default: break; - case CompLevel_none: - // If we were at full profile level, would we switch to full opt? - if (common(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(method, cur_level, i, b)) { - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; - } - } - break; - case CompLevel_limited_profile: - if (is_method_profiled(method)) { - // Special case: we got here because this method was fully profiled in the interpreter. - next_level = CompLevel_full_optimization; - } else { - MethodData* mdo = method->method_data(); - if (mdo != nullptr) { - if (mdo->would_profile()) { - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - Predicate::apply(method, cur_level, i, b))) { - next_level = CompLevel_full_profile; - } - } else { - next_level = CompLevel_full_optimization; - } - } else { - // If there is no MDO we need to profile - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - Predicate::apply(method, cur_level, i, b))) { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_full_profile: - { - MethodData* mdo = method->method_data(); - if (mdo != nullptr) { - if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) { - int mdo_i = mdo->invocation_count_delta(); - int mdo_b = mdo->backedge_count_delta(); - if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = CompLevel_full_optimization; - } - } - } - break; + next_level = trained_transition(method, cur_level, mtd, THREAD); + if (cur_level == next_level) { + // trained_transtion() is going to return the same level if no startup/warmup optimizations apply. + // In order to catch possible pathologies due to behavior change we feed the event to the regular + // TF but with profiling delay. + next_level = standard_transition(method, cur_level, true /*delay_profiling*/, disable_feedback); } } + } else { + next_level = standard_transition(method, cur_level, false /*delay_profiling*/, disable_feedback); } return (next_level != cur_level) ? limit_level(next_level) : next_level; } +template +CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) { + CompLevel next_level = cur_level; + switch(cur_level) { + default: break; + case CompLevel_none: + next_level = transition_from_none(method, cur_level, delay_profiling, disable_feedback); + break; + case CompLevel_limited_profile: + next_level = transition_from_limited_profile(method, cur_level, delay_profiling, disable_feedback); + break; + case CompLevel_full_profile: + next_level = transition_from_full_profile(method, cur_level); + break; + } + return next_level; +} + +template +CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) { + precond(cur_level == CompLevel_none); + CompLevel next_level = cur_level; + int i = method->invocation_count(); + int b = method->backedge_count(); + double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0; + // If we were at full profile level, would we switch to full opt? + if (transition_from_full_profile(method, CompLevel_full_profile) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) { + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) { + next_level = CompLevel_limited_profile; + } else { + next_level = CompLevel_full_profile; + } + } + return next_level; +} + +template +CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) { + precond(cur_level == CompLevel_full_profile); + CompLevel next_level = cur_level; + MethodData* mdo = method->method_data(); + if (mdo != nullptr) { + if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) { + int mdo_i = mdo->invocation_count_delta(); + int mdo_b = mdo->backedge_count_delta(); + if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) { + next_level = CompLevel_full_optimization; + } + } else { + next_level = CompLevel_full_optimization; + } + } + return next_level; +} + +template +CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) { + precond(cur_level == CompLevel_limited_profile); + CompLevel next_level = cur_level; + int i = method->invocation_count(); + int b = method->backedge_count(); + double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0; + MethodData* mdo = method->method_data(); + if (mdo != nullptr) { + if (mdo->would_profile()) { + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + Predicate::apply_scaled(method, cur_level, i, b, scale))) { + next_level = CompLevel_full_profile; + } + } else { + next_level = CompLevel_full_optimization; + } + } else { + // If there is no MDO we need to profile + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + Predicate::apply_scaled(method, cur_level, i, b, scale))) { + next_level = CompLevel_full_profile; + } + } + if (next_level == CompLevel_full_profile && is_method_profiled(method)) { + next_level = CompLevel_full_optimization; + } + return next_level; +} + // Determine if a method should be compiled with a normal entry point at a different level. -CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, Thread* thread) { - CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common(method, cur_level, true)); - CompLevel next_level = common(method, cur_level, is_old(method)); +CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) { + CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common(method, cur_level, THREAD, true)); + CompLevel next_level = common(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method)); // If OSR method level is greater than the regular method level, the levels should be // equalized by raising the regular method level in order to avoid OSRs during each @@ -1122,12 +1431,18 @@ CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cu } else { next_level = MAX2(osr_level, next_level); } +#if INCLUDE_JVMCI + if (EnableJVMCI && UseJVMCICompiler && + next_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) { + next_level = cur_level; + } +#endif return next_level; } // Determine if we should do an OSR compilation of a given method. -CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread) { - CompLevel next_level = common(method, cur_level, true); +CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) { + CompLevel next_level = common(method, cur_level, THREAD, true); if (cur_level == CompLevel_none) { // If there is a live OSR method that means that we deopted to the interpreter // for the transition. diff --git a/src/hotspot/share/compiler/compilationPolicy.hpp b/src/hotspot/share/compiler/compilationPolicy.hpp index fe33fb8cfba28..a2dad85af0700 100644 --- a/src/hotspot/share/compiler/compilationPolicy.hpp +++ b/src/hotspot/share/compiler/compilationPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,8 +28,82 @@ #include "code/nmethod.hpp" #include "compiler/compileBroker.hpp" #include "oops/methodData.hpp" +#include "oops/trainingData.hpp" #include "utilities/globalDefinitions.hpp" +namespace CompilationPolicyUtils { +template +class Queue { + class QueueNode : public CHeapObj { + T* _value; + QueueNode* _next; + public: + QueueNode(T* value, QueueNode* next) : _value(value), _next(next) { } + T* value() const { return _value; } + void set_next(QueueNode* next) { _next = next; } + QueueNode* next() const { return _next; } + }; + + QueueNode* _head; + QueueNode* _tail; + + void push_unlocked(T* value) { + QueueNode* n = new QueueNode(value, nullptr); + if (_tail != nullptr) { + _tail->set_next(n); + } + _tail = n; + if (_head == nullptr) { + _head = _tail; + } + } + T* pop_unlocked() { + QueueNode* n = _head; + if (_head != nullptr) { + _head = _head->next(); + } + if (_head == nullptr) { + _tail = _head; + } + T* value = nullptr; + if (n != nullptr) { + value = n->value(); + delete n; + } + return value; + } +public: + Queue() : _head(nullptr), _tail(nullptr) { } + void push(T* value, Monitor* lock, TRAPS) { + MonitorLocker locker(THREAD, lock); + push_unlocked(value); + locker.notify_all(); + } + + bool is_empty_unlocked() const { return _head == nullptr; } + + T* pop(Monitor* lock, TRAPS) { + MonitorLocker locker(THREAD, lock); + while(is_empty_unlocked() && !CompileBroker::is_compilation_disabled_forever()) { + locker.wait(); + } + T* value = pop_unlocked(); + return value; + } + + T* try_pop(Monitor* lock, TRAPS) { + MonitorLocker locker(THREAD, lock); + T* value = nullptr; + if (!is_empty_unlocked()) { + value = pop_unlocked(); + } + return value; + } + + void print_on(outputStream* st); +}; +} // namespace CompilationPolicyUtils + class CompileTask; class CompileQueue; /* @@ -173,9 +247,12 @@ class CompilationPolicy : AllStatic { friend class CallPredicate; friend class LoopPredicate; - static jlong _start_time; + typedef CompilationPolicyUtils::Queue TrainingReplayQueue; + + static int64_t _start_time; static int _c1_count, _c2_count; static double _increase_threshold_at_ratio; + static TrainingReplayQueue _training_replay_queue; // Set carry flags in the counters (in Method* and MDO). inline static void handle_counter_overflow(const methodHandle& method); @@ -187,29 +264,45 @@ class CompilationPolicy : AllStatic { inline static CompLevel limit_level(CompLevel level); // Common transition function. Given a predicate determines if a method should transition to another level. template - static CompLevel common(const methodHandle& method, CompLevel cur_level, bool disable_feedback = false); + static CompLevel common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback = false); + + template + static CompLevel transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback); + template + static CompLevel transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback); + template + static CompLevel transition_from_full_profile(const methodHandle& method, CompLevel cur_level); + template + static CompLevel standard_transition(const methodHandle& method, CompLevel cur_level, bool delayprof, bool disable_feedback); + + static CompLevel trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); + static CompLevel trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); + static CompLevel trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); + static CompLevel trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); + // Transition functions. // call_event determines if a method should be compiled at a different // level with a regular invocation entry. - static CompLevel call_event(const methodHandle& method, CompLevel cur_level, Thread* thread); + static CompLevel call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD); // loop_event checks if a method should be OSR compiled at a different // level. - static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread); - static void print_counters(const char* prefix, const Method* m); + static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD); + static void print_counters(const char* prefix, Method* m); + static void print_training_data(const char* prefix, Method* method); // Has a method been long around? // We don't remove old methods from the compile queue even if they have // very low activity (see select_task()). inline static bool is_old(const methodHandle& method); // Was a given method inactive for a given number of milliseconds. // If it is, we would remove it from the queue (see select_task()). - inline static bool is_stale(jlong t, jlong timeout, const methodHandle& method); + inline static bool is_stale(int64_t t, int64_t timeout, const methodHandle& method); // Compute the weight of the method for the compilation scheduling inline static double weight(Method* method); // Apply heuristics and return true if x should be compiled before y inline static bool compare_methods(Method* x, Method* y); // Compute event rate for a given method. The rate is the number of event (invocations + backedges) // per millisecond. - inline static void update_rate(jlong t, const methodHandle& method); + inline static void update_rate(int64_t t, const methodHandle& method); // Compute threshold scaling coefficient inline static double threshold_scale(CompLevel level, int feedback_k); // If a method is old enough and is still in the interpreter we would want to @@ -224,8 +317,8 @@ class CompilationPolicy : AllStatic { static void set_c1_count(int x) { _c1_count = x; } static void set_c2_count(int x) { _c2_count = x; } - enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; - static void print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level); + enum EventType { CALL, LOOP, COMPILE, FORCE_COMPILE, FORCE_RECOMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; + static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level); // Check if the method can be compiled, change level if necessary static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS); // Simple methods are as good being compiled with C1 as C2. @@ -242,21 +335,26 @@ class CompilationPolicy : AllStatic { int bci, CompLevel level, nmethod* nm, TRAPS); static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } - static void set_start_time(jlong t) { _start_time = t; } - static jlong start_time() { return _start_time; } + static void set_start_time(int64_t t) { _start_time = t; } + static int64_t start_time() { return _start_time; } // m must be compiled before executing it static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_any); -public: + static void maybe_compile_early(const methodHandle& m, TRAPS); + static void replay_training_at_init_impl(InstanceKlass* klass, TRAPS); + public: static int min_invocations() { return Tier4MinInvocationThreshold; } static int c1_count() { return _c1_count; } static int c2_count() { return _c2_count; } static int compiler_count(CompLevel comp_level); - // If m must_be_compiled then request a compilation from the CompileBroker. // This supports the -Xcomp option. static void compile_if_required(const methodHandle& m, TRAPS); + static void flush_replay_training_at_init(TRAPS); + static void replay_training_at_init(InstanceKlass* klass, TRAPS); + static void replay_training_at_init_loop(TRAPS); + // m is allowed to be compiled static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_any); // m is allowed to be osr compiled @@ -269,9 +367,9 @@ class CompilationPolicy : AllStatic { static nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS); // Select task is called by CompileBroker. We should return a task or nullptr. - static CompileTask* select_task(CompileQueue* compile_queue); + static CompileTask* select_task(CompileQueue* compile_queue, JavaThread* THREAD); // Tell the runtime if we think a given method is adequately profiled. - static bool is_mature(Method* method); + static bool is_mature(MethodData* mdo); // Initialize: set compiler thread count static void initialize(); static bool should_not_inline(ciEnv* env, ciMethod* callee); @@ -280,6 +378,7 @@ class CompilationPolicy : AllStatic { static CompLevel initial_compile_level(const methodHandle& method); // Return highest level possible static CompLevel highest_compile_level(); + static void dump(); }; #endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 168679feb9ba1..6af10ad7b5eaa 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -22,6 +22,7 @@ * */ +#include "cds/cdsConfig.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/symbolTable.hpp" #include "classfile/vmClasses.hpp" @@ -347,6 +348,13 @@ void CompileQueue::add(CompileTask* task) { task->log_task_queued(); } + if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) { + CompileTrainingData* ctd = CompileTrainingData::make(task); + if (ctd != nullptr) { + task->set_training_data(ctd); + } + } + // Notify CompilerThreads that a task is available. MethodCompileQueue_lock->notify_all(); } @@ -442,7 +450,7 @@ CompileTask* CompileQueue::get(CompilerThread* thread) { CompileTask* task; { NoSafepointVerifier nsv; - task = CompilationPolicy::select_task(this); + task = CompilationPolicy::select_task(this, thread); if (task != nullptr) { task = task->select_for_compilation(); } @@ -783,6 +791,10 @@ void CompileBroker::compilation_init(JavaThread* THREAD) { _initialized = true; } +void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { + CompilationPolicy::replay_training_at_init_loop(thread); +} + #if defined(ASSERT) && COMPILER2_OR_JVMCI // Entry for DeoptimizeObjectsALotThread. The threads are started in // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled @@ -860,6 +872,9 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C new_thread = new DeoptimizeObjectsALotThread(); break; #endif // ASSERT + case training_replay_t: + new_thread = new TrainingReplayThread(); + break; default: ShouldNotReachHere(); } @@ -1017,6 +1032,16 @@ void CompileBroker::init_compiler_threads() { #endif // defined(ASSERT) && COMPILER2_OR_JVMCI } +void CompileBroker::init_training_replay() { + // Ensure any exceptions lead to vm_exit_during_initialization. + EXCEPTION_MARK; + if (TrainingData::have_data()) { + Handle thread_oop = JavaThread::create_system_thread_object("Training replay thread", CHECK); + jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); + make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); + } +} + void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp index 77662692f565f..ec097a7ed3edd 100644 --- a/src/hotspot/share/compiler/compileBroker.hpp +++ b/src/hotspot/share/compiler/compileBroker.hpp @@ -254,11 +254,13 @@ class CompileBroker: AllStatic { enum ThreadType { compiler_t, - deoptimizer_t + deoptimizer_t, + training_replay_t }; static JavaThread* make_thread(ThreadType type, jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD); static void init_compiler_threads(); + static void init_training_replay(); static void possibly_add_compiler_threads(JavaThread* THREAD); static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded); @@ -451,4 +453,12 @@ class CompileBroker: AllStatic { static void print_heapinfo(outputStream *out, const char* function, size_t granularity); }; +class TrainingReplayThread : public JavaThread { + static void training_replay_thread_entry(JavaThread* thread, TRAPS); +public: + TrainingReplayThread() : JavaThread(&training_replay_thread_entry) { } + + bool is_hidden_from_external_view() const { return true; } +}; + #endif // SHARE_COMPILER_COMPILEBROKER_HPP diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index 87d4a11e52844..49afd5adca171 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -125,6 +125,7 @@ void CompileTask::initialize(int compile_id, _nm_total_size = 0; _failure_reason = nullptr; _failure_reason_on_C_heap = false; + _training_data = nullptr; _arena_bytes = 0; if (LogCompilation) { diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index 04ad7e35a139b..567e20e6c97f2 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -31,6 +31,7 @@ #include "memory/allocation.hpp" #include "utilities/xmlstream.hpp" +class CompileTrainingData; class DirectiveSet; JVMCI_ONLY(class JVMCICompileState;) @@ -113,6 +114,7 @@ class CompileTask : public CHeapObj { const char* _failure_reason; // Specifies if _failure_reason is on the C heap. bool _failure_reason_on_C_heap; + CompileTrainingData* _training_data; size_t _arena_bytes; // peak size of temporary memory during compilation (e.g. node arenas) public: @@ -215,6 +217,9 @@ class CompileTask : public CHeapObj { void set_is_free(bool val) { _is_free = val; } bool is_unloaded() const; + CompileTrainingData* training_data() const { return _training_data; } + void set_training_data(CompileTrainingData* td) { _training_data = td; } + // RedefineClasses support void metadata_do(MetadataClosure* f); void mark_on_stack(); diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp index bdaa6dffb8f5f..bacc1f73f979f 100644 --- a/src/hotspot/share/compiler/compilerDefinitions.hpp +++ b/src/hotspot/share/compiler/compilerDefinitions.hpp @@ -59,7 +59,8 @@ enum CompLevel : s1 { CompLevel_simple = 1, // C1 CompLevel_limited_profile = 2, // C1, invocation & backedge counters CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo - CompLevel_full_optimization = 4 // C2 or JVMCI + CompLevel_full_optimization = 4, // C2 or JVMCI + CompLevel_count = 5 }; class CompilationModeFlag : AllStatic { diff --git a/src/hotspot/share/compiler/compiler_globals.hpp b/src/hotspot/share/compiler/compiler_globals.hpp index a811cd8b3bae6..3c47b991120dd 100644 --- a/src/hotspot/share/compiler/compiler_globals.hpp +++ b/src/hotspot/share/compiler/compiler_globals.hpp @@ -269,6 +269,17 @@ "Maximum rate sampling interval (in milliseconds)") \ range(0, max_intx) \ \ + product(double, Tier0ProfileDelayFactor, 100.0, DIAGNOSTIC, \ + "Delay profiling/compiling of methods that were " \ + "observed to be lukewarm") \ + \ + product(double, Tier2ProfileDelayFactor, 250.0, DIAGNOSTIC, \ + "Delay profiling of methods that were observed to be lukewarm") \ + \ + product(bool, SkipTier2IfPossible, false, DIAGNOSTIC, \ + "Compile at tier 4 instead of tier 2 in training replay " \ + "mode if posssible") \ + \ product(ccstr, CompilationMode, "default", \ "Compilation modes: " \ "default: normal tiered compilation; " \ @@ -382,7 +393,6 @@ "If compilation is stopped with an error, capture diagnostic " \ "information at the bailout point") \ \ - // end of COMPILER_FLAGS DECLARE_FLAGS(COMPILER_FLAGS) diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp index d61e2461e8da6..8133b4642c097 100644 --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -205,6 +205,7 @@ class outputStream; LOG_TAG(timer) \ LOG_TAG(tlab) \ LOG_TAG(tracking) \ + LOG_TAG(training) \ LOG_TAG(trimnative) /* trim native heap */ \ LOG_TAG(unload) /* Trace unloading of classes */ \ LOG_TAG(unmap) \ diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp index 13280006fe692..fa26fbb374977 100644 --- a/src/hotspot/share/memory/allocation.cpp +++ b/src/hotspot/share/memory/allocation.cpp @@ -86,6 +86,13 @@ void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false); } +// Work-around -- see JDK-8331086 +void* MetaspaceObj::operator new(size_t size, MemTag flags) throw() { + void* p = AllocateHeap(size, flags, CALLER_PC); + memset(p, 0, size); + return p; +} + bool MetaspaceObj::is_valid(const MetaspaceObj* p) { // Weed out obvious bogus values first without traversing metaspace if ((size_t)p < os::min_page_size()) { diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp index b67dcd43e4d40..08d2bbe990d22 100644 --- a/src/hotspot/share/memory/allocation.hpp +++ b/src/hotspot/share/memory/allocation.hpp @@ -314,7 +314,10 @@ class MetaspaceObj { f(ConstantPoolCache) \ f(Annotations) \ f(MethodCounters) \ - f(RecordComponent) + f(RecordComponent) \ + f(KlassTrainingData) \ + f(MethodTrainingData) \ + f(CompileTrainingData) #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; @@ -352,6 +355,8 @@ class MetaspaceObj { void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, Type type) throw(); + // This is used for allocating training data. See JDK-8331086. + void* operator new(size_t size, MemTag flags) throw(); void operator delete(void* p) = delete; // Declare a *static* method with the same signature in any subclass of MetaspaceObj diff --git a/src/hotspot/share/memory/metadataFactory.hpp b/src/hotspot/share/memory/metadataFactory.hpp index f5935c588d79b..355702f798053 100644 --- a/src/hotspot/share/memory/metadataFactory.hpp +++ b/src/hotspot/share/memory/metadataFactory.hpp @@ -48,6 +48,12 @@ class MetadataFactory : AllStatic { return array; } + // This API should be used for TrainingData only. + template + static Array* new_array_from_c_heap(int length, MemTag flags) { + return new (length, flags) Array(length); + } + template static void free_array(ClassLoaderData* loader_data, Array* data) { if (data != nullptr) { diff --git a/src/hotspot/share/memory/metaspaceClosure.hpp b/src/hotspot/share/memory/metaspaceClosure.hpp index 22f8899e2c785..5714afc1ff96f 100644 --- a/src/hotspot/share/memory/metaspaceClosure.hpp +++ b/src/hotspot/share/memory/metaspaceClosure.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "memory/allocation.hpp" #include "metaprogramming/enableIf.hpp" #include "oops/array.hpp" +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" @@ -104,6 +105,18 @@ class MetaspaceClosure { // Symbol* bar() { return (Symbol*) _obj; } // // [2] All Array dimensions are statically declared. + // + // Pointer Tagging + // + // All metaspace pointers are at least 4 byte aligned. Therefore, it's possible for + // certain pointers to contain "tags" in their lowest 2 bits. + // + // Ref::obj() clears the tag bits in the return values. As a result, most + // callers who just want walk a closure of metaspace objects do not need to worry + // about the tag bits. + // + // If you need to use the tags, you can access the tagged pointer with Ref::addr() + // and manipulate its parts with strip_tags(), decode_tags() and add_tags() class Ref : public CHeapObj { Writability _writability; address _enclosing_obj; @@ -123,7 +136,7 @@ class MetaspaceClosure { virtual ~Ref() {} address obj() const { - return *addr(); + return strip_tags(*addr()); } address* addr() const { @@ -143,12 +156,35 @@ class MetaspaceClosure { Ref* next() const { return _next; } }; + // Pointer tagging support + constexpr static uintx TAG_MASK = 0x03; + + template + static T strip_tags(T ptr_with_tags) { + uintx n = (uintx)ptr_with_tags; + return (T)(n & ~TAG_MASK); + } + + template + static uintx decode_tags(T ptr_with_tags) { + uintx n = (uintx)ptr_with_tags; + return (n & TAG_MASK); + } + + template + static T add_tags(T ptr, uintx tags) { + uintx n = (uintx)ptr; + assert((n & TAG_MASK) == 0, "sanity"); + assert(tags <= TAG_MASK, "sanity"); + return (T)(n | tags); + } + private: // MSORef -- iterate an instance of MetaspaceObj template class MSORef : public Ref { T** _mpp; T* dereference() const { - return *_mpp; + return strip_tags(*_mpp); } protected: virtual void** mpp() const { @@ -176,7 +212,7 @@ class MetaspaceClosure { Array** _mpp; protected: Array* dereference() const { - return *_mpp; + return strip_tags(*_mpp); } virtual void** mpp() const { return (void**)_mpp; diff --git a/src/hotspot/share/oops/array.hpp b/src/hotspot/share/oops/array.hpp index 12e23080166ec..91a348728d2c6 100644 --- a/src/hotspot/share/oops/array.hpp +++ b/src/hotspot/share/oops/array.hpp @@ -54,6 +54,11 @@ class Array: public MetaspaceObj { NONCOPYABLE(Array); inline void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw(); + inline void* operator new(size_t size, ClassLoaderData* loader_data, int length) throw(); + + // Work-around -- see JDK-8331086 + inline void* operator new(size_t size, int length, MemTag flags) throw(); + static size_t byte_sizeof(int length, size_t elm_byte_size) { return sizeof(Array) + MAX2(length - 1, 0) * elm_byte_size; diff --git a/src/hotspot/share/oops/array.inline.hpp b/src/hotspot/share/oops/array.inline.hpp index 28f8a35dc0d4a..3fa7fd15fb318 100644 --- a/src/hotspot/share/oops/array.inline.hpp +++ b/src/hotspot/share/oops/array.inline.hpp @@ -37,4 +37,19 @@ inline void* Array::operator new(size_t size, ClassLoaderData* loader_data, i MetaspaceObj::array_type(sizeof(T)), false, THREAD); } +template +inline void* Array::operator new(size_t size, ClassLoaderData* loader_data, int length) throw() { + size_t word_size = Array::size(length); + return (void*) Metaspace::allocate(loader_data, word_size, + MetaspaceObj::array_type(sizeof(T)), false); +} + +template +inline void* Array::operator new(size_t size, int length, MemTag flags) throw() { + size = Array::size(length) * BytesPerWord; + void* p = AllocateHeap(size * BytesPerWord, flags); + memset(p, 0, size); + return p; +} + #endif // SHARE_OOPS_ARRAY_INLINE_HPP diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 715c8f473d087..b862c5f890f9e 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1322,6 +1322,7 @@ void InstanceKlass::initialize_impl(TRAPS) { if (!HAS_PENDING_EXCEPTION) { set_initialization_state_and_notify(fully_initialized, CHECK); debug_only(vtable().verify(tty, true);) + CompilationPolicy::replay_training_at_init(this, THREAD); } else { // Step 10 and 11 @@ -2641,6 +2642,8 @@ void InstanceKlass::remove_unshareable_info() { set_verified_at_dump_time(); } + _misc_flags.set_has_init_deps_processed(false); + Klass::remove_unshareable_info(); if (SystemDictionaryShared::has_class_failed_verification(this)) { diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index cedc17e9bafde..2e5c6c877c819 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -1124,6 +1124,12 @@ class InstanceKlass: public Klass { bool can_be_verified_at_dumptime() const; void compute_has_loops_flag_for_methods(); #endif + bool has_init_deps_processed() const { return _misc_flags.has_init_deps_processed(); } + void set_has_init_deps_processed() { + assert(is_initialized(), ""); + assert(!has_init_deps_processed(), "already set"); // one-off action + _misc_flags.set_has_init_deps_processed(true); + } u2 compute_modifier_flags() const; diff --git a/src/hotspot/share/oops/instanceKlassFlags.hpp b/src/hotspot/share/oops/instanceKlassFlags.hpp index ae70f3019e70f..7a743465745c2 100644 --- a/src/hotspot/share/oops/instanceKlassFlags.hpp +++ b/src/hotspot/share/oops/instanceKlassFlags.hpp @@ -68,6 +68,7 @@ class InstanceKlassFlags { status(has_been_redefined , 1 << 2) /* class has been redefined */ \ status(is_scratch_class , 1 << 3) /* class is the redefined scratch class */ \ status(is_marked_dependent , 1 << 4) /* class is the redefined scratch class */ \ + status(has_init_deps_processed , 1 << 5) /* all init dependencies are processed */ \ /* end of list */ #define IK_STATUS_ENUM_NAME(name, value) _misc_##name = value, diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 0c4430b44c3f5..7a71e99bd17fb 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -59,6 +59,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/trainingData.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/atomic.hpp" @@ -405,11 +406,23 @@ void Method::metaspace_pointers_do(MetaspaceClosure* it) { void Method::remove_unshareable_info() { unlink_method(); + if (method_data() != nullptr) { + method_data()->remove_unshareable_info(); + } + if (method_counters() != nullptr) { + method_counters()->remove_unshareable_info(); + } JFR_ONLY(REMOVE_METHOD_ID(this);) } void Method::restore_unshareable_info(TRAPS) { assert(is_method() && is_valid_method(this), "ensure C++ vtable is restored"); + if (method_data() != nullptr) { + method_data()->restore_unshareable_info(CHECK); + } + if (method_counters() != nullptr) { + method_counters()->restore_unshareable_info(CHECK); + } assert(!queued_for_compilation(), "method's queued_for_compilation flag should not be set"); } #endif @@ -577,9 +590,43 @@ void Method::print_invocation_count(outputStream* st) { #endif } +MethodTrainingData* Method::training_data_or_null() const { + MethodCounters* mcs = method_counters(); + if (mcs == nullptr) { + return nullptr; + } else { + MethodTrainingData* mtd = mcs->method_training_data(); + if (mtd == mcs->method_training_data_sentinel()) { + return nullptr; + } + return mtd; + } +} + +bool Method::init_training_data(MethodTrainingData* td) { + MethodCounters* mcs = method_counters(); + if (mcs == nullptr) { + return false; + } else { + return mcs->init_method_training_data(td); + } +} + +bool Method::install_training_method_data(const methodHandle& method) { + MethodTrainingData* mtd = MethodTrainingData::find(method); + if (mtd != nullptr && mtd->final_profile() != nullptr) { + Atomic::replace_if_null(&method->_method_data, mtd->final_profile()); + return true; + } + return false; +} + // Build a MethodData* object to hold profiling information collected on this // method when requested. void Method::build_profiling_method_data(const methodHandle& method, TRAPS) { + if (install_training_method_data(method)) { + return; + } // Do not profile the method if metaspace has hit an OOM previously // allocating profiling data. Callers clear pending exception so don't // add one here. @@ -1150,6 +1197,12 @@ void Method::unlink_method() { clear_method_data(); clear_method_counters(); + clear_is_not_c1_compilable(); + clear_is_not_c1_osr_compilable(); + clear_is_not_c2_compilable(); + clear_is_not_c2_osr_compilable(); + clear_queued_for_compilation(); + remove_unshareable_flags(); } diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 5daa572a46ccf..ce6b859f7c121 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -59,6 +59,7 @@ class LocalVariableTableElement; class AdapterHandlerEntry; class MethodData; class MethodCounters; +class MethodTrainingData; class ConstMethod; class InlineTableSizes; class nmethod; @@ -310,9 +311,13 @@ class Method : public Metadata { TRAPS); // method data access - MethodData* method_data() const { + MethodData* method_data() const { return _method_data; } + void set_method_data(MethodData* data); + + MethodTrainingData* training_data_or_null() const; + bool init_training_data(MethodTrainingData* td); // mark an exception handler as entered (used to prune dead catch blocks in C2) void set_exception_handler_entered(int handler_bci); @@ -341,7 +346,7 @@ class Method : public Metadata { bool was_never_executed() { return !was_executed_more_than(0); } static void build_profiling_method_data(const methodHandle& method, TRAPS); - + static bool install_training_method_data(const methodHandle& method); static MethodCounters* build_method_counters(Thread* current, Method* m); inline int interpreter_invocation_count() const; diff --git a/src/hotspot/share/oops/methodCounters.cpp b/src/hotspot/share/oops/methodCounters.cpp index 4abf9ba53e7f9..2bdce81186055 100644 --- a/src/hotspot/share/oops/methodCounters.cpp +++ b/src/hotspot/share/oops/methodCounters.cpp @@ -22,13 +22,19 @@ * */ +#include "cds/cdsConfig.hpp" #include "compiler/compiler_globals.hpp" #include "compiler/compilerOracle.hpp" +#include "memory/metaspaceClosure.hpp" #include "oops/method.hpp" #include "oops/methodCounters.hpp" +#include "oops/trainingData.hpp" #include "runtime/handles.inline.hpp" +#include "memory/resourceArea.hpp" MethodCounters::MethodCounters(const methodHandle& mh) : + _method(mh()), + _method_training_data(method_training_data_sentinel()), _prev_time(0), _rate(0), _highest_comp_level(0), @@ -47,14 +53,18 @@ MethodCounters::MethodCounters(const methodHandle& mh) : _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; } +MethodCounters::MethodCounters() { + assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); +} + MethodCounters* MethodCounters::allocate_no_exception(const methodHandle& mh) { ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); - return new(loader_data, size(), MetaspaceObj::MethodCountersType) MethodCounters(mh); + return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType) MethodCounters(mh); } MethodCounters* MethodCounters::allocate_with_exception(const methodHandle& mh, TRAPS) { ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); - return new(loader_data, size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh); + return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh); } void MethodCounters::clear_counters() { @@ -68,7 +78,47 @@ void MethodCounters::clear_counters() { set_highest_osr_comp_level(0); } +void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) { + log_trace(cds)("Iter(MethodCounters): %p", this); + it->push(&_method); + it->push(&_method_training_data); +} + +#if INCLUDE_CDS +void MethodCounters::remove_unshareable_info() { +} +void MethodCounters::restore_unshareable_info(TRAPS) { + _method_training_data = method_training_data_sentinel(); +} +#endif // INCLUDE_CDS + +void MethodCounters::print_on(outputStream* st) const { + assert(is_methodCounters(), "should be method counters"); + st->print("method counters"); + print_data_on(st); +} + +void MethodCounters::print_data_on(outputStream* st) const { + ResourceMark rm; + st->print_cr(" - invocation_counter: %d carry=%d", _invocation_counter.count(), _invocation_counter.carry()); + st->print_cr(" - backedge_counter: %d carry=%d", _backedge_counter.count(), _backedge_counter.carry()); + st->print_cr(" - prev_time: " JLONG_FORMAT, _prev_time); + st->print_cr(" - rate: %.3f", _rate); + st->print_cr(" - invoke_mask: %d", _invoke_mask); + st->print_cr(" - backedge_mask: %d", _backedge_mask); + st->print_cr(" - prev_event_count: %d", _prev_event_count); +#if COMPILER2_OR_JVMCI + st->print_cr(" - interpreter_throwout_count: %u", _interpreter_throwout_count); +#endif +#if INCLUDE_JVMTI + st->print_cr(" - number_of_breakpoints: %u", _number_of_breakpoints); +#endif + st->print_cr(" - highest_comp_level: %u", _highest_comp_level); + st->print_cr(" - highest_osr_comp_level: %u", _highest_osr_comp_level); +} + void MethodCounters::print_value_on(outputStream* st) const { + assert(is_methodCounters(), "must be methodCounters"); st->print("method counters"); print_address_on(st); } diff --git a/src/hotspot/share/oops/methodCounters.hpp b/src/hotspot/share/oops/methodCounters.hpp index bdf033a46a855..0a26bab1bb745 100644 --- a/src/hotspot/share/oops/methodCounters.hpp +++ b/src/hotspot/share/oops/methodCounters.hpp @@ -30,12 +30,25 @@ #include "interpreter/invocationCounter.hpp" #include "utilities/align.hpp" -class MethodCounters : public MetaspaceObj { +class MethodTrainingData; + +class MethodCounters : public Metadata { friend class VMStructs; friend class JVMCIVMStructs; + + // Used by CDS. These classes need to access the private default constructor. + template friend class CppVtableTesterA; + template friend class CppVtableTesterB; + template friend class CppVtableCloner; + private: InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequency-based optimizations + + // Back pointer to the Method* + Method* _method; + + Metadata* _method_training_data; jlong _prev_time; // Previous time the rate was acquired float _rate; // Events (invocation and backedge counter increments) per millisecond int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog @@ -51,20 +64,26 @@ class MethodCounters : public MetaspaceObj { u1 _highest_osr_comp_level; // Same for OSR level MethodCounters(const methodHandle& mh); + MethodCounters(); + public: + virtual bool is_methodCounters() const { return true; } + Method* method() const { return _method; } static MethodCounters* allocate_no_exception(const methodHandle& mh); static MethodCounters* allocate_with_exception(const methodHandle& mh, TRAPS); - DEBUG_ONLY(bool on_stack() { return false; }) void deallocate_contents(ClassLoaderData* loader_data) {} - void metaspace_pointers_do(MetaspaceClosure* it) { return; } - - static int size() { + static int method_counters_size() { return align_up((int)sizeof(MethodCounters), wordSize) / wordSize; } + virtual int size() const { + return method_counters_size(); + } MetaspaceObj::Type type() const { return MethodCountersType; } + void metaspace_pointers_do(MetaspaceClosure* iter); + void clear_counters(); #if COMPILER2_OR_JVMCI @@ -107,6 +126,7 @@ class MethodCounters : public MetaspaceObj { int highest_osr_comp_level() const { return _highest_osr_comp_level; } void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = (u1)level; } + // invocation counter InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; } @@ -127,7 +147,33 @@ class MethodCounters : public MetaspaceObj { return byte_offset_of(MethodCounters, _backedge_mask); } - const char* internal_name() const { return "{method counters}"; } + virtual const char* internal_name() const { return "{method counters}"; } + + Metadata* method_training_data_sentinel() { + return this; + } + MethodTrainingData* method_training_data() const { + return reinterpret_cast(_method_training_data); + } + bool init_method_training_data(MethodTrainingData* td) { + MethodTrainingData* cur = method_training_data(); + if (cur == td) { + return true; + } + if (cur == nullptr || cur == reinterpret_cast(method_training_data_sentinel())) { + return Atomic::cmpxchg(reinterpret_cast(&_method_training_data), cur, td) == cur; + } + return false; + } + +#if INCLUDE_CDS + void remove_unshareable_info(); + void restore_unshareable_info(TRAPS); +#endif + + // Printing + void print_on (outputStream* st) const; void print_value_on(outputStream* st) const; + void print_data_on(outputStream* st) const; }; #endif // SHARE_OOPS_METHODCOUNTERS_HPP diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index 280a69df58359..0a050975155ae 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -22,7 +22,9 @@ * */ +#include "cds/cdsConfig.hpp" #include "ci/ciMethodData.hpp" +#include "classfile/systemDictionaryShared.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compilationPolicy.hpp" #include "compiler/compilerDefinitions.inline.hpp" @@ -319,24 +321,68 @@ void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* md } } +static bool is_excluded(Klass* k) { +#if INCLUDE_CDS + if (SafepointSynchronize::is_at_safepoint() && + CDSConfig::is_dumping_archive() && + CDSConfig::current_thread_is_vm_or_dumper()) { + if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) { + log_debug(cds)("Purged %s from MDO: unloaded class", k->name()->as_C_string()); + return true; + } else if (CDSConfig::is_dumping_dynamic_archive() && k->is_shared()) { + return false; + } else { + bool excluded = SystemDictionaryShared::should_be_excluded(k); + if (excluded) { + log_debug(cds)("Purged %s from MDO: excluded class", k->name()->as_C_string()); + } + return excluded; + } + } +#endif + return false; +} + void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { for (int i = 0; i < _number_of_entries; i++) { intptr_t p = type(i); Klass* k = (Klass*)klass_part(p); - if (k != nullptr && (always_clean || !k->is_loader_alive())) { - set_type(i, with_status((Klass*)nullptr, p)); + if (k != nullptr) { + if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { + continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? + } + if (always_clean || !k->is_loader_alive() || is_excluded(k)) { + set_type(i, with_status((Klass*)nullptr, p)); + } } } } +void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) { + for (int i = 0; i < _number_of_entries; i++) { + Klass** k = (Klass**)type_adr(i); // tagged + it->push(k); + } +} + void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { intptr_t p = type(); Klass* k = (Klass*)klass_part(p); - if (k != nullptr && (always_clean || !k->is_loader_alive())) { - set_type(with_status((Klass*)nullptr, p)); + if (k != nullptr) { + if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { + return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? + } + if (always_clean || !k->is_loader_alive() || is_excluded(k)) { + set_type(with_status((Klass*)nullptr, p)); + } } } +void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) { + Klass** k = (Klass**)type_adr(); // tagged + it->push(k); +} + bool TypeEntriesAtCall::return_profiling_enabled() { return MethodData::profile_return(); } @@ -412,12 +458,24 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { for (uint row = 0; row < row_limit(); row++) { Klass* p = receiver(row); - if (p != nullptr && (always_clean || !p->is_loader_alive())) { - clear_row(row); + if (p != nullptr) { + if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) { + continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? + } + if (always_clean || !p->is_loader_alive() || is_excluded(p)) { + clear_row(row); + } } } } +void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) { + for (uint row = 0; row < row_limit(); row++) { + Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row)); + it->push(recv); + } +} + void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { uint row; int entries = 0; @@ -646,6 +704,11 @@ void ParametersTypeData::print_data_on(outputStream* st, const char* extra) cons st->cr(); } +void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) { + Method** m = (Method**)intptr_at_adr(speculative_trap_method); + it->push(m); +} + void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { print_shared(st, "SpeculativeTrapData", extra); tab(st); @@ -1223,10 +1286,14 @@ void MethodData::post_initialize(BytecodeStream* stream) { MethodData::MethodData(const methodHandle& method) : _method(method()), // Holds Compile_lock - _extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"), _compiler_counters(), _parameters_type_data_di(parameters_uninitialized) { - initialize(); + _extra_data_lock = nullptr; + initialize(); +} + +MethodData::MethodData() { + assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); } // Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's @@ -1364,7 +1431,7 @@ void MethodData::init() { } bool MethodData::is_mature() const { - return CompilationPolicy::is_mature(_method); + return CompilationPolicy::is_mature((MethodData*)this); } // Translate a bci to its corresponding data index (di). @@ -1552,7 +1619,8 @@ void MethodData::print_value_on(outputStream* st) const { } void MethodData::print_data_on(outputStream* st) const { - ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(), + Mutex* lock = const_cast(this)->extra_data_lock(); + ConditionalMutexLocker ml(lock, !lock->owned_by_self(), Mutex::_no_safepoint_check_flag); ResourceMark rm; ProfileData* data = first_data(); @@ -1725,8 +1793,26 @@ bool MethodData::profile_parameters_for_method(const methodHandle& m) { } void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { - log_trace(cds)("Iter(MethodData): %p", this); + log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string()); it->push(&_method); + if (_parameters_type_data_di != no_parameters) { + parameters_type_data()->metaspace_pointers_do(it); + } + for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) { + data->metaspace_pointers_do(it); + } + for (DataLayout* dp = extra_data_base(); + dp < extra_data_limit(); + dp = MethodData::next_extra(dp)) { + if (dp->tag() == DataLayout::speculative_trap_data_tag) { + ResourceMark rm; + SpeculativeTrapData* data = new SpeculativeTrapData(dp); + data->metaspace_pointers_do(it); + } else if (dp->tag() == DataLayout::no_tag || + dp->tag() == DataLayout::arg_info_data_tag) { + break; + } + } } void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { @@ -1758,6 +1844,9 @@ class CleanExtraDataKlassClosure : public CleanExtraDataClosure { public: CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} bool is_live(Method* m) { + if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) { + return true; // TODO: treat as unloaded instead? + } return !(_always_clean) && m->method_holder()->is_loader_alive(); } }; @@ -1769,6 +1858,20 @@ class CleanExtraDataMethodClosure : public CleanExtraDataClosure { bool is_live(Method* m) { return !m->is_old(); } }; +Mutex* MethodData::extra_data_lock() { + Mutex* lock = Atomic::load(&_extra_data_lock); + if (lock == nullptr) { + // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint + lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock"); + Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock); + if (old != nullptr) { + // Another thread created the lock before us. Use that lock instead. + delete lock; + return old; + } + } + return lock; +} // Remove SpeculativeTrapData entries that reference an unloaded or // redefined method @@ -1785,7 +1888,7 @@ void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { SpeculativeTrapData* data = new SpeculativeTrapData(dp); Method* m = data->method(); assert(m != nullptr, "should have a method"); - if (!cl->is_live(m)) { + if (is_excluded(m->method_holder()) || !cl->is_live(m)) { // "shift" accumulates the number of cells for dead // SpeculativeTrapData entries that have been seen so // far. Following entries must be shifted left by that many @@ -1889,13 +1992,23 @@ void MethodData::release_C_heap_structures() { #endif } +#if INCLUDE_CDS +void MethodData::remove_unshareable_info() { + _extra_data_lock = nullptr; +} + +void MethodData::restore_unshareable_info(TRAPS) { + //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock"); +} +#endif // INCLUDE_CDS + #ifdef ASSERT void MethodData::check_extra_data_locked() const { // Cast const away, just to be able to verify the lock // Usually we only want non-const accesses on the lock, // so this here is an exception. MethodData* self = (MethodData*)this; - assert(self->extra_data_lock()->owned_by_self(), "must have lock"); + assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock"); assert(!Thread::current()->is_Java_thread() || JavaThread::current()->is_in_no_safepoint_scope(), "JavaThread must have NoSafepointVerifier inside lock scope"); diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index 7870d6e459f1f..61137d9fb7ac1 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -29,7 +29,6 @@ #include "interpreter/invocationCounter.hpp" #include "oops/metadata.hpp" #include "oops/method.hpp" -#include "oops/oop.hpp" #include "runtime/atomic.hpp" #include "runtime/deoptimization.hpp" #include "runtime/mutex.hpp" @@ -202,6 +201,9 @@ class DataLayout { intptr_t cell_at(int index) const { return _cells[index]; } + intptr_t* cell_at_adr(int index) const { + return const_cast(&_cells[index]); + } bool set_flag_at(u1 flag_number) { const u1 bit = 1 << flag_number; @@ -345,6 +347,10 @@ class ProfileData : public ResourceObj { assert(0 <= index && index < cell_count(), "oob"); return data()->cell_at(index); } + intptr_t* intptr_at_adr(int index) const { + assert(0 <= index && index < cell_count(), "oob"); + return data()->cell_at_adr(index); + } void set_uint_at(int index, uint value) { set_intptr_at(index, (intptr_t) value); } @@ -362,12 +368,6 @@ class ProfileData : public ResourceObj { int int_at_unchecked(int index) const { return (int)data()->cell_at(index); } - void set_oop_at(int index, oop value) { - set_intptr_at(index, cast_from_oop(value)); - } - oop oop_at(int index) const { - return cast_to_oop(intptr_at(index)); - } void set_flag_at(u1 flag_number) { data()->set_flag_at(flag_number); @@ -488,7 +488,10 @@ class ProfileData : public ResourceObj { // GC support virtual void clean_weak_klass_links(bool always_clean) {} - // CI translation: ProfileData can represent both MethodDataOop data + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it) {} + + // CI translation: ProfileData can represent both MethodDataOop data // as well as CIMethodData data. This function is provided for translating // an oop in a ProfileData to the ci equivalent. Generally speaking, // most ProfileData don't require any translation, so we provide the null @@ -853,6 +856,11 @@ class TypeStackSlotEntries : public TypeEntries { return _pd->intptr_at(type_offset_in_cells(i)); } + intptr_t* type_adr(int i) const { + assert(i >= 0 && i < _number_of_entries, "oob"); + return _pd->intptr_at_adr(type_offset_in_cells(i)); + } + // set type for entry i void set_type(int i, intptr_t k) { assert(i >= 0 && i < _number_of_entries, "oob"); @@ -874,6 +882,9 @@ class TypeStackSlotEntries : public TypeEntries { // GC support void clean_weak_klass_links(bool always_clean); + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it); + void print_data_on(outputStream* st) const; }; @@ -898,6 +909,10 @@ class ReturnTypeEntry : public TypeEntries { return _pd->intptr_at(_base_off); } + intptr_t* type_adr() const { + return _pd->intptr_at_adr(_base_off); + } + void set_type(intptr_t k) { _pd->set_intptr_at(_base_off, k); } @@ -917,6 +932,9 @@ class ReturnTypeEntry : public TypeEntries { // GC support void clean_weak_klass_links(bool always_clean); + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it); + void print_data_on(outputStream* st) const; }; @@ -1108,6 +1126,16 @@ class CallTypeData : public CounterData { } } + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it) { + if (has_arguments()) { + _args.metaspace_pointers_do(it); + } + if (has_return()) { + _ret.metaspace_pointers_do(it); + } + } + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; @@ -1218,6 +1246,9 @@ class ReceiverTypeData : public CounterData { // GC support virtual void clean_weak_klass_links(bool always_clean); + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it); + void print_receiver_data_on(outputStream* st) const; void print_data_on(outputStream* st, const char* extra = nullptr) const; }; @@ -1383,6 +1414,17 @@ class VirtualCallTypeData : public VirtualCallData { } } + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it) { + ReceiverTypeData::metaspace_pointers_do(it); + if (has_arguments()) { + _args.metaspace_pointers_do(it); + } + if (has_return()) { + _ret.metaspace_pointers_do(it); + } + } + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; @@ -1566,10 +1608,6 @@ class ArrayData : public ProfileData { int aindex = index + array_start_off_set; return int_at(aindex); } - oop array_oop_at(int index) const { - int aindex = index + array_start_off_set; - return oop_at(aindex); - } void array_set_int_at(int index, int value) { int aindex = index + array_start_off_set; set_int_at(aindex, value); @@ -1782,6 +1820,11 @@ class ParametersTypeData : public ArrayData { _parameters.clean_weak_klass_links(always_clean); } + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it) { + _parameters.metaspace_pointers_do(it); + } + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; static ByteSize stack_slot_offset(int i) { @@ -1852,6 +1895,9 @@ class SpeculativeTrapData : public ProfileData { return cell_offset(speculative_trap_method); } + // CDS support + virtual void metaspace_pointers_do(MetaspaceClosure* it); + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; @@ -1962,13 +2008,15 @@ class MethodData : public Metadata { // Cached hint for bci_to_dp and bci_to_data int _hint_di; - Mutex _extra_data_lock; + Mutex* volatile _extra_data_lock; MethodData(const methodHandle& method); void initialize(); public: + MethodData(); + static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS); virtual bool is_methodData() const { return true; } @@ -2266,6 +2314,11 @@ class MethodData : public Metadata { } #endif +#if INCLUDE_CDS + void remove_unshareable_info(); + void restore_unshareable_info(TRAPS); +#endif + void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; } bool would_profile() const { return _would_profile != no_profile; } @@ -2504,7 +2557,7 @@ class MethodData : public Metadata { void clean_method_data(bool always_clean); void clean_weak_method_links(); - Mutex* extra_data_lock() const { return const_cast(&_extra_data_lock); } + Mutex* extra_data_lock(); void check_extra_data_locked() const NOT_DEBUG_RETURN; }; diff --git a/src/hotspot/share/oops/trainingData.cpp b/src/hotspot/share/oops/trainingData.cpp new file mode 100644 index 0000000000000..2f8acbc7755a0 --- /dev/null +++ b/src/hotspot/share/oops/trainingData.cpp @@ -0,0 +1,802 @@ +/* + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "ci/ciEnv.hpp" +#include "ci/ciMetadata.hpp" +#include "cds/cdsConfig.hpp" +#include "cds/metaspaceShared.hpp" +#include "classfile/classLoaderData.hpp" +#include "classfile/compactHashtable.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/symbolTable.hpp" +#include "classfile/systemDictionaryShared.hpp" +#include "compiler/compileTask.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/metaspaceClosure.hpp" +#include "memory/resourceArea.hpp" +#include "oops/method.hpp" +#include "oops/methodCounters.hpp" +#include "oops/trainingData.hpp" +#include "runtime/arguments.hpp" +#include "runtime/javaThread.inline.hpp" +#include "runtime/jniHandles.inline.hpp" +#include "utilities/growableArray.hpp" + +TrainingData::TrainingDataSet TrainingData::_training_data_set(1024, 0x3fffffff); +TrainingData::TrainingDataDictionary TrainingData::_archived_training_data_dictionary; +TrainingData::TrainingDataDictionary TrainingData::_archived_training_data_dictionary_for_dumping; +TrainingData::DumptimeTrainingDataDictionary* TrainingData::_dumptime_training_data_dictionary = nullptr; +int TrainingData::TrainingDataLocker::_lock_mode; +volatile bool TrainingData::TrainingDataLocker::_snapshot = false; + +MethodTrainingData::MethodTrainingData() { + assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); +} + +KlassTrainingData::KlassTrainingData() { + assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); +} + +CompileTrainingData::CompileTrainingData() : _level(-1), _compile_id(-1) { + assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); +} + +void TrainingData::initialize() { + // this is a nop if training modes are not enabled + if (have_data() || need_data()) { + // Data structures that we have do not currently support iterative training. So you cannot replay + // and train at the same time. Going forward we may want to adjust iteration/search to enable that. + guarantee(have_data() != need_data(), "Iterative training is not supported"); + TrainingDataLocker::initialize(); + } +} + +static void verify_archived_entry(TrainingData* td, const TrainingData::Key* k) { + guarantee(TrainingData::Key::can_compute_cds_hash(k), ""); + TrainingData* td1 = TrainingData::lookup_archived_training_data(k); + guarantee(td == td1, ""); +} + +void TrainingData::verify() { + if (TrainingData::have_data()) { + archived_training_data_dictionary()->iterate([&](TrainingData* td) { + if (td->is_KlassTrainingData()) { + KlassTrainingData* ktd = td->as_KlassTrainingData(); + if (ktd->has_holder() && ktd->holder()->is_loaded()) { + Key k(ktd->holder()); + verify_archived_entry(td, &k); + } + ktd->verify(); + } else if (td->is_MethodTrainingData()) { + MethodTrainingData* mtd = td->as_MethodTrainingData(); + if (mtd->has_holder() && mtd->holder()->method_holder()->is_loaded()) { + Key k(mtd->holder()); + verify_archived_entry(td, &k); + } + mtd->verify(); + } else if (td->is_CompileTrainingData()) { + td->as_CompileTrainingData()->verify(); + } + }); + } +} + +MethodTrainingData* MethodTrainingData::make(const methodHandle& method, bool null_if_not_found, bool use_cache) { + MethodTrainingData* mtd = nullptr; + if (!have_data() && !need_data()) { + return mtd; + } + // Try grabbing the cached value first. + // Cache value is stored in MethodCounters and the following are the + // possible states: + // 1. Cached value is method_training_data_sentinel(). + // This is an initial state and needs a full lookup. + // 2. Cached value is null. + // Lookup failed the last time, if we don't plan to create a new TD object, + // i.e. null_if_no_found == true, then just return a null. + // 3. Cache value is not null. + // Return it, the value of training_data_lookup_failed doesn't matter. + MethodCounters* mcs = method->method_counters(); + if (mcs != nullptr) { + mtd = mcs->method_training_data(); + if (mtd != nullptr && mtd != mcs->method_training_data_sentinel()) { + return mtd; + } + if (null_if_not_found && mtd == nullptr) { + assert(mtd == nullptr, "No training data found"); + return nullptr; + } + } else if (use_cache) { + mcs = Method::build_method_counters(Thread::current(), method()); + } + + TrainingData* td = nullptr; + + Key key(method()); + if (have_data()) { + td = lookup_archived_training_data(&key); + if (td != nullptr) { + mtd = td->as_MethodTrainingData(); + } else { + mtd = nullptr; + } + // Cache the pointer to MTD in MethodCounters for faster lookup (could be null if not found) + method->init_training_data(mtd); + } + + if (need_data()) { + TrainingDataLocker l; + td = training_data_set()->find(&key); + if (td == nullptr) { + if (!null_if_not_found) { + KlassTrainingData* ktd = KlassTrainingData::make(method->method_holder()); + if (ktd == nullptr) { + return nullptr; // allocation failure + } + mtd = MethodTrainingData::allocate(method(), ktd); + if (mtd == nullptr) { + return nullptr; // allocation failure + } + td = training_data_set()->install(mtd); + assert(td == mtd, ""); + } else { + mtd = nullptr; + } + } else { + mtd = td->as_MethodTrainingData(); + } + // Cache the pointer to MTD in MethodCounters for faster lookup (could be null if not found) + method->init_training_data(mtd); + } + + return mtd; +} + +void MethodTrainingData::print_on(outputStream* st, bool name_only) const { + if (has_holder()) { + _klass->print_on(st, true); + st->print("."); + name()->print_symbol_on(st); + signature()->print_symbol_on(st); + } + if (name_only) { + return; + } + if (!has_holder()) { + st->print("[SYM]"); + } + if (_level_mask) { + st->print(" LM%d", _level_mask); + } + st->print(" mc=%p mdo=%p", _final_counters, _final_profile); +} + +CompileTrainingData* CompileTrainingData::make(CompileTask* task) { + int level = task->comp_level(); + int compile_id = task->compile_id(); + Thread* thread = Thread::current(); + methodHandle m(thread, task->method()); + if (m->method_holder() == nullptr) { + return nullptr; // do not record (dynamically generated method) + } + MethodTrainingData* mtd = MethodTrainingData::make(m); + if (mtd == nullptr) { + return nullptr; // allocation failure + } + mtd->notice_compilation(level); + + TrainingDataLocker l; + CompileTrainingData* ctd = CompileTrainingData::allocate(mtd, level, compile_id); + if (ctd != nullptr) { + CompileTrainingData*& last_ctd = mtd->_last_toplevel_compiles[level - 1]; + if (last_ctd != nullptr) { + assert(mtd->highest_top_level() >= level, "consistency"); + if (last_ctd->compile_id() < compile_id) { + last_ctd->clear_init_deps(); + last_ctd = ctd; + } + } else { + last_ctd = ctd; + mtd->notice_toplevel_compilation(level); + } + } + return ctd; +} + + +void CompileTrainingData::dec_init_deps_left(KlassTrainingData* ktd) { + LogStreamHandle(Trace, training) log; + if (log.is_enabled()) { + log.print("CTD "); print_on(&log); log.cr(); + log.print("KTD "); ktd->print_on(&log); log.cr(); + } + assert(ktd!= nullptr && ktd->has_holder(), ""); + assert(_init_deps.contains(ktd), ""); + assert(_init_deps_left > 0, ""); + + uint init_deps_left1 = Atomic::sub(&_init_deps_left, 1); + + if (log.is_enabled()) { + uint init_deps_left2 = compute_init_deps_left(); + log.print("init_deps_left: %d (%d)", init_deps_left1, init_deps_left2); + ktd->print_on(&log, true); + } +} + +uint CompileTrainingData::compute_init_deps_left(bool count_initialized) { + int left = 0; + for (int i = 0; i < _init_deps.length(); i++) { + KlassTrainingData* ktd = _init_deps.at(i); + // Ignore symbolic refs and already initialized classes (unless explicitly requested). + if (ktd->has_holder()) { + InstanceKlass* holder = ktd->holder(); + if (!ktd->holder()->is_initialized() || count_initialized) { + ++left; + } else if (holder->is_shared_unregistered_class()) { + Key k(holder); + if (CDS_ONLY(!Key::can_compute_cds_hash(&k)) NOT_CDS(true)) { + ++left; + } + } + } + } + return left; +} + +void CompileTrainingData::print_on(outputStream* st, bool name_only) const { + _method->print_on(st, true); + st->print("#%dL%d", _compile_id, _level); + if (name_only) { + return; + } + if (_init_deps.length() > 0) { + if (_init_deps_left > 0) { + st->print(" udeps=%d", _init_deps_left); + } + for (int i = 0, len = _init_deps.length(); i < len; i++) { + st->print(" dep:"); + _init_deps.at(i)->print_on(st, true); + } + } +} + +void CompileTrainingData::notice_inlined_method(CompileTask* task, + const methodHandle& method) { + MethodTrainingData* mtd = MethodTrainingData::make(method); + if (mtd != nullptr) { + mtd->notice_compilation(task->comp_level(), true); + } +} + +void CompileTrainingData::notice_jit_observation(ciEnv* env, ciBaseObject* what) { + // A JIT is starting to look at class k. + // We could follow the queries that it is making, but it is + // simpler to assume, conservatively, that the JIT will + // eventually depend on the initialization state of k. + CompileTask* task = env->task(); + assert(task != nullptr, ""); + Method* method = task->method(); + InstanceKlass* compiling_klass = method->method_holder(); + if (what->is_metadata()) { + ciMetadata* md = what->as_metadata(); + if (md->is_loaded() && md->is_instance_klass()) { + ciInstanceKlass* cik = md->as_instance_klass(); + + if (cik->is_initialized()) { + InstanceKlass* ik = md->as_instance_klass()->get_instanceKlass(); + KlassTrainingData* ktd = KlassTrainingData::make(ik); + if (ktd == nullptr) { + // Allocation failure or snapshot in progress + return; + } + // This JIT task is (probably) requesting that ik be initialized, + // so add him to my _init_deps list. + TrainingDataLocker l; + add_init_dep(ktd); + } + } + } +} + +void KlassTrainingData::prepare(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + ClassLoaderData* loader_data = nullptr; + if (_holder != nullptr) { + loader_data = _holder->class_loader_data(); + } else { + loader_data = java_lang_ClassLoader::loader_data(SystemDictionary::java_system_loader()); // default CLD + } + _comp_deps.prepare(loader_data); +} + +void MethodTrainingData::prepare(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + klass()->prepare(visitor); + if (has_holder()) { + _final_counters = holder()->method_counters(); + _final_profile = holder()->method_data(); + assert(_final_profile == nullptr || _final_profile->method() == holder(), ""); + } + for (int i = 0; i < CompLevel_count - 1; i++) { + CompileTrainingData* ctd = _last_toplevel_compiles[i]; + if (ctd != nullptr) { + ctd->prepare(visitor); + } + } +} + +void CompileTrainingData::prepare(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + method()->prepare(visitor); + ClassLoaderData* loader_data = _method->klass()->class_loader_data(); + _init_deps.prepare(loader_data); + _ci_records.prepare(loader_data); +} + +KlassTrainingData* KlassTrainingData::make(InstanceKlass* holder, bool null_if_not_found) { + Key key(holder); + TrainingData* td = CDS_ONLY(have_data() ? lookup_archived_training_data(&key) :) nullptr; + KlassTrainingData* ktd = nullptr; + if (td != nullptr) { + ktd = td->as_KlassTrainingData(); + guarantee(!ktd->has_holder() || ktd->holder() == holder, ""); + if (ktd->has_holder()) { + return ktd; + } else { + ktd = nullptr; + } + } + if (need_data()) { + TrainingDataLocker l; + td = training_data_set()->find(&key); + if (td == nullptr) { + if (null_if_not_found) { + return nullptr; + } + ktd = KlassTrainingData::allocate(holder); + if (ktd == nullptr) { + return nullptr; // allocation failure + } + td = training_data_set()->install(ktd); + assert(ktd == td, ""); + } else { + ktd = td->as_KlassTrainingData(); + guarantee(ktd->holder() != nullptr, "null holder"); + } + assert(ktd != nullptr, ""); + guarantee(ktd->holder() == holder, ""); + } + return ktd; +} + +void KlassTrainingData::print_on(outputStream* st, bool name_only) const { + if (has_holder()) { + name()->print_symbol_on(st); + switch (holder()->init_state()) { + case InstanceKlass::allocated: st->print("[A]"); break; + case InstanceKlass::loaded: st->print("[D]"); break; + case InstanceKlass::linked: st->print("[L]"); break; + case InstanceKlass::being_initialized: st->print("[i]"); break; + case InstanceKlass::fully_initialized: break; + case InstanceKlass::initialization_error: st->print("[E]"); break; + default: fatal("unknown state: %d", holder()->init_state()); + } + if (holder()->is_interface()) { + st->print("I"); + } + } else { + st->print("[SYM]"); + } + if (name_only) { + return; + } + if (_comp_deps.length() > 0) { + for (int i = 0, len = _comp_deps.length(); i < len; i++) { + st->print(" dep:"); + _comp_deps.at(i)->print_on(st, true); + } + } +} + +KlassTrainingData::KlassTrainingData(InstanceKlass* klass) : TrainingData(klass) { + if (holder() == klass) { + return; // no change to make + } + + jobject hmj = _holder_mirror; + if (hmj != nullptr) { // clear out previous handle, if any + _holder_mirror = nullptr; + assert(JNIHandles::is_global_handle(hmj), ""); + JNIHandles::destroy_global(hmj); + } + + if (klass != nullptr) { + Handle hm(JavaThread::current(), klass->java_mirror()); + hmj = JNIHandles::make_global(hm); + Atomic::release_store(&_holder_mirror, hmj); + } + + Atomic::release_store(&_holder, const_cast(klass)); + assert(holder() == klass, ""); +} + +void KlassTrainingData::notice_fully_initialized() { + ResourceMark rm; + assert(has_holder(), ""); + assert(holder()->is_initialized(), "wrong state: %s %s", + holder()->name()->as_C_string(), holder()->init_state_name()); + + TrainingDataLocker l; // Not a real lock if we don't collect the data, + // that's why we need the atomic decrement below. + for (int i = 0; i < comp_dep_count(); i++) { + comp_dep(i)->dec_init_deps_left(this); + } + holder()->set_has_init_deps_processed(); +} + +void TrainingData::init_dumptime_table(TRAPS) { + if (!need_data()) { + return; + } + _dumptime_training_data_dictionary = new DumptimeTrainingDataDictionary(); + if (CDSConfig::is_dumping_final_static_archive()) { + _archived_training_data_dictionary.iterate([&](TrainingData* record) { + _dumptime_training_data_dictionary->append(record); + }); + } else { + TrainingDataLocker l; + TrainingDataLocker::snapshot(); + + ResourceMark rm; + Visitor visitor(training_data_set()->size()); + training_data_set()->iterate([&](TrainingData* td) { + td->prepare(visitor); + if (!td->is_CompileTrainingData()) { + _dumptime_training_data_dictionary->append(td); + } + }); + + if (AOTVerifyTrainingData) { + training_data_set()->verify(); + } + } +} + +void TrainingData::iterate_roots(MetaspaceClosure* it) { + if (!need_data()) { + return; + } + assert(_dumptime_training_data_dictionary != nullptr, ""); + for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) { + _dumptime_training_data_dictionary->at(i).metaspace_pointers_do(it); + } +} + +void TrainingData::dump_training_data() { + if (!need_data()) { + return; + } + write_training_data_dictionary(&_archived_training_data_dictionary_for_dumping); +} + +void TrainingData::cleanup_training_data() { + if (_dumptime_training_data_dictionary != nullptr) { + ResourceMark rm; + Visitor visitor(_dumptime_training_data_dictionary->length()); + for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) { + TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data(); + td->cleanup(visitor); + } + // Throw away all elements with empty keys + int j = 0; + for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) { + TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data(); + if (td->key()->is_empty()) { + continue; + } + if (i != j) { // no need to copy if it's the same + _dumptime_training_data_dictionary->at_put(j, td); + } + j++; + } + _dumptime_training_data_dictionary->trunc_to(j); + } +} + +void KlassTrainingData::cleanup(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + if (has_holder()) { + bool is_excluded = !holder()->is_loaded() || SystemDictionaryShared::check_for_exclusion(holder(), nullptr); + if (is_excluded) { + ResourceMark rm; + log_debug(cds)("Cleanup KTD %s", name()->as_klass_external_name()); + _holder = nullptr; + key()->make_empty(); + } + } + for (int i = 0; i < _comp_deps.length(); i++) { + _comp_deps.at(i)->cleanup(visitor); + } +} + +void MethodTrainingData::cleanup(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + if (has_holder()) { + if (SystemDictionaryShared::check_for_exclusion(holder()->method_holder(), nullptr)) { + log_debug(cds)("Cleanup MTD %s::%s", name()->as_klass_external_name(), signature()->as_utf8()); + if (_final_profile != nullptr && _final_profile->method() != _holder) { + log_warning(cds)("Stale MDO for %s::%s", name()->as_klass_external_name(), signature()->as_utf8()); + } + _final_profile = nullptr; + _final_counters = nullptr; + _holder = nullptr; + key()->make_empty(); + } + } + for (int i = 0; i < CompLevel_count - 1; i++) { + CompileTrainingData* ctd = _last_toplevel_compiles[i]; + if (ctd != nullptr) { + ctd->cleanup(visitor); + } + } +} + +void KlassTrainingData::verify() { + for (int i = 0; i < comp_dep_count(); i++) { + CompileTrainingData* ctd = comp_dep(i); + if (!ctd->_init_deps.contains(this)) { + print_on(tty); tty->cr(); + ctd->print_on(tty); tty->cr(); + } + guarantee(ctd->_init_deps.contains(this), ""); + } +} + +void MethodTrainingData::verify() { + iterate_compiles([](CompileTrainingData* ctd) { + ctd->verify(); + + int init_deps_left1 = ctd->init_deps_left(); + int init_deps_left2 = ctd->compute_init_deps_left(); + + if (init_deps_left1 != init_deps_left2) { + ctd->print_on(tty); tty->cr(); + } + guarantee(init_deps_left1 == init_deps_left2, "mismatch: %d %d %d", + init_deps_left1, init_deps_left2, ctd->init_deps_left()); + }); +} + +void CompileTrainingData::verify() { + for (int i = 0; i < init_dep_count(); i++) { + KlassTrainingData* ktd = init_dep(i); + if (ktd->has_holder() && ktd->holder()->is_shared_unregistered_class()) { + LogStreamHandle(Warning, training) log; + if (log.is_enabled()) { + ResourceMark rm; + log.print("CTD "); print_value_on(&log); + log.print(" depends on unregistered class %s", ktd->holder()->name()->as_C_string()); + } + } + if (!ktd->_comp_deps.contains(this)) { + print_on(tty); tty->cr(); + ktd->print_on(tty); tty->cr(); + } + guarantee(ktd->_comp_deps.contains(this), ""); + } +} + +void CompileTrainingData::cleanup(Visitor& visitor) { + if (visitor.is_visited(this)) { + return; + } + visitor.visit(this); + method()->cleanup(visitor); +} + +void TrainingData::serialize(SerializeClosure* soc) { + if (soc->writing()) { + _archived_training_data_dictionary_for_dumping.serialize_header(soc); + } else { + _archived_training_data_dictionary.serialize_header(soc); + } +} + +class TrainingDataPrinter : StackObj { + outputStream* _st; + int _index; +public: + TrainingDataPrinter(outputStream* st) : _st(st), _index(0) {} + void do_value(TrainingData* td) { + const char* type = (td->is_KlassTrainingData() ? "K" : + td->is_MethodTrainingData() ? "M" : + td->is_CompileTrainingData() ? "C" : "?"); + _st->print("%4d: %p %s ", _index++, td, type); + td->print_on(_st); + _st->cr(); + if (td->is_KlassTrainingData()) { + td->as_KlassTrainingData()->iterate_comp_deps([&](CompileTrainingData* ctd) { + ResourceMark rm; + _st->print_raw(" C "); + ctd->print_on(_st); + _st->cr(); + }); + } else if (td->is_MethodTrainingData()) { + td->as_MethodTrainingData()->iterate_compiles([&](CompileTrainingData* ctd) { + ResourceMark rm; + _st->print_raw(" C "); + ctd->print_on(_st); + _st->cr(); + }); + } else if (td->is_CompileTrainingData()) { + // ? + } + } +}; + +void TrainingData::print_archived_training_data_on(outputStream* st) { + st->print_cr("Archived TrainingData Dictionary"); + TrainingDataPrinter tdp(st); + TrainingDataLocker::initialize(); + _archived_training_data_dictionary.iterate(&tdp); +} + +void TrainingData::Key::metaspace_pointers_do(MetaspaceClosure *iter) { + iter->push(const_cast(&_meta)); +} + +void TrainingData::metaspace_pointers_do(MetaspaceClosure* iter) { + _key.metaspace_pointers_do(iter); +} + +bool TrainingData::Key::can_compute_cds_hash(const Key* const& k) { + return k->meta() == nullptr || MetaspaceObj::is_shared(k->meta()); +} + +uint TrainingData::Key::cds_hash(const Key* const& k) { + return SystemDictionaryShared::hash_for_shared_dictionary((address)k->meta()); +} + +void TrainingData::write_training_data_dictionary(TrainingDataDictionary* dictionary) { + if (!need_data()) { + return; + } + assert(_dumptime_training_data_dictionary != nullptr, ""); + CompactHashtableStats stats; + dictionary->reset(); + CompactHashtableWriter writer(_dumptime_training_data_dictionary->length(), &stats); + for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) { + TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data(); +#ifdef ASSERT + for (int j = i+1; j < _dumptime_training_data_dictionary->length(); j++) { + TrainingData* td1 = _dumptime_training_data_dictionary->at(j).training_data(); + assert(!TrainingData::Key::equals(td1, td->key(), -1), "conflict"); + } +#endif // ASSERT + td = ArchiveBuilder::current()->get_buffered_addr(td); + uint hash = TrainingData::Key::cds_hash(td->key()); + u4 delta = ArchiveBuilder::current()->buffer_to_offset_u4((address)td); + writer.add(hash, delta); + } + writer.dump(dictionary, "training data dictionary"); +} + +TrainingData* TrainingData::lookup_archived_training_data(const Key* k) { + // For this to work, all components of the key must be in shared metaspace. + if (!TrainingData::Key::can_compute_cds_hash(k) || _archived_training_data_dictionary.empty()) { + return nullptr; + } + uint hash = TrainingData::Key::cds_hash(k); + TrainingData* td = _archived_training_data_dictionary.lookup(k, hash, -1 /*unused*/); + if (td != nullptr) { + if ((td->is_KlassTrainingData() && td->as_KlassTrainingData()->has_holder()) || + (td->is_MethodTrainingData() && td->as_MethodTrainingData()->has_holder())) { + return td; + } else { + ShouldNotReachHere(); + } + } + return nullptr; +} + +template +void TrainingData::DepList::metaspace_pointers_do(MetaspaceClosure* iter) { + iter->push(&_deps); +} + +void KlassTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) { + log_trace(cds)("Iter(KlassTrainingData): %p", this); + TrainingData::metaspace_pointers_do(iter); + _comp_deps.metaspace_pointers_do(iter); + iter->push(&_holder); +} + +void MethodTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) { + log_trace(cds)("Iter(MethodTrainingData): %p", this); + TrainingData::metaspace_pointers_do(iter); + iter->push(&_klass); + iter->push((Method**)&_holder); + for (int i = 0; i < CompLevel_count - 1; i++) { + iter->push(&_last_toplevel_compiles[i]); + } + iter->push(&_final_profile); + iter->push(&_final_counters); +} + +void CompileTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) { + log_trace(cds)("Iter(CompileTrainingData): %p", this); + TrainingData::metaspace_pointers_do(iter); + _init_deps.metaspace_pointers_do(iter); + _ci_records.metaspace_pointers_do(iter); + iter->push(&_method); +} + +template +void TrainingData::DepList::prepare(ClassLoaderData* loader_data) { + if (_deps == nullptr && _deps_dyn != nullptr) { + int len = _deps_dyn->length(); + _deps = MetadataFactory::new_array_from_c_heap(len, mtClassShared); + for (int i = 0; i < len; i++) { + _deps->at_put(i, _deps_dyn->at(i)); // copy + } + } +} + +void KlassTrainingData::remove_unshareable_info() { + TrainingData::remove_unshareable_info(); + _holder_mirror = nullptr; + _comp_deps.remove_unshareable_info(); +} + +void MethodTrainingData::remove_unshareable_info() { + TrainingData::remove_unshareable_info(); + if (_final_counters != nullptr) { + _final_counters->remove_unshareable_info(); + } + if (_final_profile != nullptr) { + _final_profile->remove_unshareable_info(); + } +} + +void CompileTrainingData::remove_unshareable_info() { + TrainingData::remove_unshareable_info(); + _init_deps.remove_unshareable_info(); + _ci_records.remove_unshareable_info(); + _init_deps_left = compute_init_deps_left(true); +} diff --git a/src/hotspot/share/oops/trainingData.hpp b/src/hotspot/share/oops/trainingData.hpp new file mode 100644 index 0000000000000..b27145799011a --- /dev/null +++ b/src/hotspot/share/oops/trainingData.hpp @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_TRAININGDATA_HPP +#define SHARE_OOPS_TRAININGDATA_HPP + +#include "classfile/classLoaderData.hpp" +#include "classfile/compactHashtable.hpp" +#include "compiler/compilerDefinitions.hpp" +#include "compiler/compiler_globals.hpp" +#include "memory/allocation.hpp" +#include "memory/metaspaceClosure.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/method.hpp" +#include "runtime/handles.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/count_leading_zeros.hpp" +#include "utilities/resizeableResourceHash.hpp" + +class ciEnv; +class ciBaseObject; +class CompileTask; +class CompileTrainingData; +class KlassTrainingData; +class MethodTrainingData; + +// Base class for all the training data varieties +class TrainingData : public Metadata { + friend KlassTrainingData; + friend MethodTrainingData; + friend CompileTrainingData; +public: + // Key is used to insert any TrainingData (TD) object into a hash tables. The key is currently a + // pointer to a metaspace object the TD is associated with. For example, + // for KlassTrainingData it's an InstanceKlass, for MethodTrainingData it's a Method. + // The utility of the these hash tables is to be able to find a TD object for a given metaspace + // metaspace object. + class Key { + mutable Metadata* _meta; + // These guys can get to my constructors: + friend TrainingData; + friend KlassTrainingData; + friend MethodTrainingData; + friend CompileTrainingData; + + // The empty key + Key() : _meta(nullptr) { } + bool is_empty() const { return _meta == nullptr; } + public: + Key(Metadata* meta) : _meta(meta) { } + + static bool can_compute_cds_hash(const Key* const& k); + static uint cds_hash(const Key* const& k); + static unsigned hash(const Key* const& k) { + return primitive_hash(k->meta()); + } + static bool equals(const Key* const& k1, const Key* const& k2) { + return k1->meta() == k2->meta(); + } + static inline bool equals(TrainingData* value, const TrainingData::Key* key, int unused) { + return equals(value->key(), key); + } + int cmp(const Key* that) const { + auto m1 = this->meta(); + auto m2 = that->meta(); + if (m1 < m2) return -1; + if (m1 > m2) return +1; + return 0; + } + Metadata* meta() const { return _meta; } + void metaspace_pointers_do(MetaspaceClosure *iter); + void make_empty() const { _meta = nullptr; } + }; + + // TrainingDataLocker is used to guard read/write operations on non-MT-safe data structures. + // It supports recursive locking and a read-only mode (in which case no locks are taken). + // It is also a part of the TD collection termination protocol (see the "spanshot" field). + class TrainingDataLocker { + static volatile bool _snapshot; // If true we're not allocating new training data + static int _lock_mode; + const bool _recursive; + static void lock() { +#if INCLUDE_CDS + assert(_lock_mode != 0, "Forgot to call TrainingDataLocker::initialize()"); + if (_lock_mode > 0) { + TrainingData_lock->lock(); + } +#endif + } + static void unlock() { +#if INCLUDE_CDS + if (_lock_mode > 0) { + TrainingData_lock->unlock(); + } +#endif + } + static bool safely_locked() { +#if INCLUDE_CDS + assert(_lock_mode != 0, "Forgot to call TrainingDataLocker::initialize()"); + if (_lock_mode > 0) { + return is_self_locked(); + } else { + return true; + } +#else + return true; +#endif + } + static bool is_self_locked() { + return CDS_ONLY(TrainingData_lock->owned_by_self()) NOT_CDS(false); + } + + public: + static void snapshot() { +#if INCLUDE_CDS + assert_locked(); + _snapshot = true; +#endif + } + static bool can_add() { +#if INCLUDE_CDS + assert_locked(); + return !_snapshot; +#else + return false; +#endif + } + static void initialize() { +#if INCLUDE_CDS + _lock_mode = need_data() ? +1 : -1; // if -1, we go lock-free +#endif + } + static void assert_locked() { + assert(safely_locked(), "use under TrainingDataLocker"); + } + static void assert_can_add() { + assert(can_add(), "Cannot add TrainingData objects"); + } + TrainingDataLocker() : _recursive(is_self_locked()) { + if (!_recursive) { + lock(); + } + } + ~TrainingDataLocker() { + if (!_recursive) { + unlock(); + } + } + }; + + // A set of TD objects that we collect during the training run. + class TrainingDataSet { + friend TrainingData; + ResizeableResourceHashtable + _table; + + public: + template + TrainingDataSet(Arg... arg) + : _table(arg...) { + } + TrainingData* find(const Key* key) const { + TrainingDataLocker::assert_locked(); + if (TrainingDataLocker::can_add()) { + auto res = _table.get(key); + return res == nullptr ? nullptr : *res; + } + return nullptr; + } + bool remove(const Key* key) { + return _table.remove(key); + } + TrainingData* install(TrainingData* td) { + TrainingDataLocker::assert_locked(); + TrainingDataLocker::assert_can_add(); + auto key = td->key(); + if (key->is_empty()) { + return td; // unkeyed TD not installed + } + bool created = false; + auto prior = _table.put_if_absent(key, td, &created); + if (prior == nullptr || *prior == td) { + return td; + } + assert(false, "no pre-existing elements allowed"); + return *prior; + } + template + void iterate(const Function& fn) const { // lambda enabled API + iterate(const_cast(fn)); + } + template + void iterate(Function& fn) const { // lambda enabled API + return _table.iterate_all([&](const TrainingData::Key* k, TrainingData* td) { fn(td); }); + } + int size() const { return _table.number_of_entries(); } + + void verify() const { + TrainingDataLocker::assert_locked(); + iterate([&](TrainingData* td) { td->verify(); }); + } + }; + + // A widget to ensure that we visit TD object only once (TD objects can have pointer to + // other TD object that are sometimes circular). + class Visitor { + ResizeableResourceHashtable _visited; + public: + Visitor(unsigned size) : _visited(size, 0x3fffffff) { } + bool is_visited(TrainingData* td) { + return _visited.contains(td); + } + void visit(TrainingData* td) { + bool created; + _visited.put_if_absent(td, &created); + } + }; + + typedef OffsetCompactHashtable TrainingDataDictionary; +private: + Key _key; + + // just forward all constructor arguments to the embedded key + template + TrainingData(Arg... arg) + : _key(arg...) { } + + // Container for recording TD during training run + static TrainingDataSet _training_data_set; + // Containter for replaying the training data (read-only, populated from the AOT image) + static TrainingDataDictionary _archived_training_data_dictionary; + // Container used for writing the AOT image + static TrainingDataDictionary _archived_training_data_dictionary_for_dumping; + class DumpTimeTrainingDataInfo { + TrainingData* _training_data; + public: + DumpTimeTrainingDataInfo() : DumpTimeTrainingDataInfo(nullptr) {} + DumpTimeTrainingDataInfo(TrainingData* training_data) : _training_data(training_data) {} + void metaspace_pointers_do(MetaspaceClosure* it) { + it->push(&_training_data); + } + TrainingData* training_data() { + return _training_data; + } + }; + typedef GrowableArrayCHeap DumptimeTrainingDataDictionary; + // A temporary container that is used to accumulate and filter TD during dumping + static DumptimeTrainingDataDictionary* _dumptime_training_data_dictionary; + + static TrainingDataSet* training_data_set() { return &_training_data_set; } + static TrainingDataDictionary* archived_training_data_dictionary() { return &_archived_training_data_dictionary; } + + public: + // Returns the key under which this TD is installed, or else + // Key::EMPTY if it is not installed. + const Key* key() const { return &_key; } + + static bool have_data() { return AOTReplayTraining; } // Going to read + static bool need_data() { return AOTRecordTraining; } // Going to write + + template + static void iterate(const Function& fn) { iterate(const_cast(fn)); } + + template + static void iterate(Function& fn) { // lambda enabled API + TrainingDataLocker l; + if (have_data()) { + archived_training_data_dictionary()->iterate(fn); + } + if (need_data()) { + training_data_set()->iterate(fn); + } + } + + virtual MethodTrainingData* as_MethodTrainingData() const { return nullptr; } + virtual KlassTrainingData* as_KlassTrainingData() const { return nullptr; } + virtual CompileTrainingData* as_CompileTrainingData() const { return nullptr; } + bool is_MethodTrainingData() const { return as_MethodTrainingData() != nullptr; } + bool is_KlassTrainingData() const { return as_KlassTrainingData() != nullptr; } + bool is_CompileTrainingData() const { return as_CompileTrainingData() != nullptr; } + + virtual void prepare(Visitor& visitor) = 0; + virtual void cleanup(Visitor& visitor) = 0; + + static void initialize() NOT_CDS_RETURN; + + static void verify(); + + // Widget for recording dependencies, as an N-to-M graph relation, + // possibly cyclic. + template + class DepList : public StackObj { + GrowableArrayCHeap* _deps_dyn; + Array* _deps; + public: + DepList() { + _deps_dyn = nullptr; + _deps = nullptr; + } + + int length() const { + return (_deps_dyn != nullptr ? _deps_dyn->length() + : _deps != nullptr ? _deps->length() + : 0); + } + E* adr_at(int i) const { + return (_deps_dyn != nullptr ? _deps_dyn->adr_at(i) + : _deps != nullptr ? _deps->adr_at(i) + : nullptr); + } + E at(int i) const { + assert(i >= 0 && i < length(), "oob"); + return *adr_at(i); + } + bool append_if_missing(E dep) { + if (_deps_dyn == nullptr) { + _deps_dyn = new GrowableArrayCHeap(10); + _deps_dyn->append(dep); + return true; + } else { + return _deps_dyn->append_if_missing(dep); + } + } + bool remove_if_existing(E dep) { + if (_deps_dyn != nullptr) { + return _deps_dyn->remove_if_existing(dep); + } + return false; + } + void clear() { + if (_deps_dyn != nullptr) { + _deps_dyn->clear(); + } + } + void append(E dep) { + if (_deps_dyn == nullptr) { + _deps_dyn = new GrowableArrayCHeap(10); + } + _deps_dyn->append(dep); + } + bool contains(E dep) { + for (int i = 0; i < length(); i++) { + if (dep == at(i)) { + return true; // found + } + } + return false; // not found + } + +#if INCLUDE_CDS + void remove_unshareable_info() { + _deps_dyn = nullptr; + } +#endif + void prepare(ClassLoaderData* loader_data); + void metaspace_pointers_do(MetaspaceClosure *iter); + }; + + virtual void metaspace_pointers_do(MetaspaceClosure *iter); + + static void init_dumptime_table(TRAPS); + +#if INCLUDE_CDS + virtual void remove_unshareable_info() {} + static void iterate_roots(MetaspaceClosure* it); + static void dump_training_data(); + static void cleanup_training_data(); + static void serialize(SerializeClosure* soc); + static void print_archived_training_data_on(outputStream* st); + static void write_training_data_dictionary(TrainingDataDictionary* dictionary); + static TrainingData* lookup_archived_training_data(const Key* k); +#endif + + template + static TrainingDataType* allocate(ArgTypes... args) { + assert(need_data() || have_data(), ""); + if (TrainingDataLocker::can_add()) { + return new (mtClassShared) TrainingDataType(args...); + } + return nullptr; + } +}; + +// Training data that is associated with an InstanceKlass +class KlassTrainingData : public TrainingData { + friend TrainingData; + friend CompileTrainingData; + + // Used by CDS. These classes need to access the private default constructor. + template friend class CppVtableTesterA; + template friend class CppVtableTesterB; + template friend class CppVtableCloner; + + // cross-link to live klass, or null if not loaded or encountered yet + InstanceKlass* _holder; + jobject _holder_mirror; // extra link to prevent unloading by GC + + DepList _comp_deps; // compiles that depend on me + + KlassTrainingData(); + KlassTrainingData(InstanceKlass* klass); + + int comp_dep_count() const { + TrainingDataLocker::assert_locked(); + return _comp_deps.length(); + } + CompileTrainingData* comp_dep(int i) const { + TrainingDataLocker::assert_locked(); + return _comp_deps.at(i); + } + void add_comp_dep(CompileTrainingData* ctd) { + TrainingDataLocker::assert_locked(); + _comp_deps.append_if_missing(ctd); + } + void remove_comp_dep(CompileTrainingData* ctd) { + TrainingDataLocker::assert_locked(); + _comp_deps.remove_if_existing(ctd); + } + + public: + Symbol* name() const { + precond(has_holder()); + return holder()->name(); + } + bool has_holder() const { return _holder != nullptr; } + InstanceKlass* holder() const { return _holder; } + + static KlassTrainingData* make(InstanceKlass* holder, + bool null_if_not_found = false) NOT_CDS_RETURN_(nullptr); + static KlassTrainingData* find(InstanceKlass* holder) { + return make(holder, true); + } + virtual KlassTrainingData* as_KlassTrainingData() const { return const_cast(this); }; + + ClassLoaderData* class_loader_data() { + assert(has_holder(), ""); + return holder()->class_loader_data(); + } + void notice_fully_initialized() NOT_CDS_RETURN; + + void print_on(outputStream* st, bool name_only) const; + virtual void print_on(outputStream* st) const { print_on(st, false); } + virtual void print_value_on(outputStream* st) const { print_on(st, true); } + + virtual void prepare(Visitor& visitor); + virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN; + + MetaspaceObj::Type type() const { + return KlassTrainingDataType; + } + +#if INCLUDE_CDS + virtual void remove_unshareable_info(); +#endif + + void metaspace_pointers_do(MetaspaceClosure *iter); + + int size() const { + return (int)align_metadata_size(align_up(sizeof(KlassTrainingData), BytesPerWord)/BytesPerWord); + } + + const char* internal_name() const { + return "{ klass training data }"; + }; + + void verify(); + + static KlassTrainingData* allocate(InstanceKlass* holder) { + return TrainingData::allocate(holder); + } + + template + void iterate_comp_deps(Function fn) const { // lambda enabled API + TrainingDataLocker l; + for (int i = 0; i < comp_dep_count(); i++) { + fn(comp_dep(i)); + } + } +}; + +// Information about particular JIT tasks. +class CompileTrainingData : public TrainingData { + friend TrainingData; + friend KlassTrainingData; + + // Used by CDS. These classes need to access the private default constructor. + template friend class CppVtableTesterA; + template friend class CppVtableTesterB; + template friend class CppVtableCloner; + + MethodTrainingData* _method; + const short _level; + const int _compile_id; + + // classes that should be initialized before this JIT task runs + DepList _init_deps; + // Number of uninitialized classes left, when it's 0, all deps are satisfied + volatile int _init_deps_left; + +public: + // ciRecords is a generic meachanism to memoize CI responses to arbitary queries. For each function we're interested in we record + // (return_value, argument_values) tuples in a list. Arguments are allowed to have Metaspace pointers in them. + class ciRecords { + template class Arguments { + public: + bool operator==(const Arguments<>&) const { return true; } + void metaspace_pointers_do(MetaspaceClosure *iter) { } + }; + template class Arguments { + private: + T _first; + Arguments _remaining; + + public: + constexpr Arguments(const T& first, const Ts&... remaining) noexcept + : _first(first), _remaining(remaining...) {} + constexpr Arguments() noexcept : _first(), _remaining() {} + bool operator==(const Arguments& that) const { + return _first == that._first && _remaining == that._remaining; + } + template::value && std::is_base_of::type>::value)> + void metaspace_pointers_do(MetaspaceClosure *iter) { + iter->push(&_first); + _remaining.metaspace_pointers_do(iter); + } + template::value && std::is_base_of::type>::value))> + void metaspace_pointers_do(MetaspaceClosure *iter) { + _remaining.metaspace_pointers_do(iter); + } + }; + + template class ciMemoizedFunction : public StackObj { + public: + class OptionalReturnType { + bool _valid; + ReturnType _result; + public: + OptionalReturnType(bool valid, const ReturnType& result) : _valid(valid), _result(result) {} + bool is_valid() const { return _valid; } + ReturnType result() const { return _result; } + }; + private: + typedef Arguments ArgumentsType; + class Record : public MetaspaceObj { + ReturnType _result; + ArgumentsType _arguments; + public: + Record(const ReturnType& result, const ArgumentsType& arguments) : _result(result), _arguments(arguments) {} + Record() { } + ReturnType result() const { return _result; } + ArgumentsType arguments() const { return _arguments; } + bool operator==(const Record& that) { return _arguments == that._arguments; } + void metaspace_pointers_do(MetaspaceClosure *iter) { _arguments.metaspace_pointers_do(iter); } + }; + DepList _data; + public: + OptionalReturnType find(const Args&... args) { + ArgumentsType a(args...); + for (int i = 0; i < _data.length(); i++) { + if (_data.at(i).arguments() == a) { + return OptionalReturnType(true, _data.at(i).result()); + } + } + return OptionalReturnType(false, ReturnType()); + } + bool append_if_missing(const ReturnType& result, const Args&... args) { + return _data.append_if_missing(Record(result, ArgumentsType(args...))); + } +#if INCLUDE_CDS + void remove_unshareable_info() { _data.remove_unshareable_info(); } +#endif + void prepare(ClassLoaderData* loader_data) { + _data.prepare(loader_data); + } + void metaspace_pointers_do(MetaspaceClosure *iter) { + _data.metaspace_pointers_do(iter); + } + }; + + +public: + // Record CI answers for the InlineSmallCode heuristic. It is importance since the heuristic is non-commutative and we may want to + // compile methods in a different order than in the training run. + typedef ciMemoizedFunction ciMethod__inline_instructions_size_type; + ciMethod__inline_instructions_size_type ciMethod__inline_instructions_size; +#if INCLUDE_CDS + void remove_unshareable_info() { + ciMethod__inline_instructions_size.remove_unshareable_info(); + } +#endif + void prepare(ClassLoaderData* loader_data) { + ciMethod__inline_instructions_size.prepare(loader_data); + } + void metaspace_pointers_do(MetaspaceClosure *iter) { + ciMethod__inline_instructions_size.metaspace_pointers_do(iter); + } + }; + +private: + ciRecords _ci_records; + + CompileTrainingData(); + CompileTrainingData(MethodTrainingData* mtd, + int level, + int compile_id) + : TrainingData(), // empty key + _method(mtd), _level(level), _compile_id(compile_id), _init_deps_left(0) { } +public: + ciRecords& ci_records() { return _ci_records; } + static CompileTrainingData* make(CompileTask* task) NOT_CDS_RETURN_(nullptr); + + virtual CompileTrainingData* as_CompileTrainingData() const { return const_cast(this); }; + + MethodTrainingData* method() const { return _method; } + + int level() const { return _level; } + + int compile_id() const { return _compile_id; } + + int init_dep_count() const { + TrainingDataLocker::assert_locked(); + return _init_deps.length(); + } + KlassTrainingData* init_dep(int i) const { + TrainingDataLocker::assert_locked(); + return _init_deps.at(i); + } + void add_init_dep(KlassTrainingData* ktd) { + TrainingDataLocker::assert_locked(); + ktd->add_comp_dep(this); + _init_deps.append_if_missing(ktd); + } + void clear_init_deps() { + TrainingDataLocker::assert_locked(); + for (int i = 0; i < _init_deps.length(); i++) { + _init_deps.at(i)->remove_comp_dep(this); + } + _init_deps.clear(); + } + void dec_init_deps_left(KlassTrainingData* ktd); + int init_deps_left() const { + return Atomic::load(&_init_deps_left); + } + uint compute_init_deps_left(bool count_initialized = false); + + void notice_inlined_method(CompileTask* task, const methodHandle& method) NOT_CDS_RETURN; + + // The JIT looks at classes and objects too and can depend on their state. + // These simple calls just report the *possibility* of an observation. + void notice_jit_observation(ciEnv* env, ciBaseObject* what) NOT_CDS_RETURN; + + virtual void prepare(Visitor& visitor); + virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN; + + void print_on(outputStream* st, bool name_only) const; + virtual void print_on(outputStream* st) const { print_on(st, false); } + virtual void print_value_on(outputStream* st) const { print_on(st, true); } + +#if INCLUDE_CDS + virtual void remove_unshareable_info(); +#endif + + virtual void metaspace_pointers_do(MetaspaceClosure* iter); + virtual MetaspaceObj::Type type() const { return CompileTrainingDataType; } + + virtual const char* internal_name() const { + return "{ compile training data }"; + }; + + virtual int size() const { + return (int)align_metadata_size(align_up(sizeof(CompileTrainingData), BytesPerWord)/BytesPerWord); + } + + void verify(); + + static CompileTrainingData* allocate(MethodTrainingData* mtd, int level, int compile_id) { + return TrainingData::allocate(mtd, level, compile_id); + } +}; + +// Record information about a method at the time compilation is requested. +class MethodTrainingData : public TrainingData { + friend TrainingData; + friend CompileTrainingData; + + // Used by CDS. These classes need to access the private default constructor. + template friend class CppVtableTesterA; + template friend class CppVtableTesterB; + template friend class CppVtableCloner; + + KlassTrainingData* _klass; + Method* _holder; + CompileTrainingData* _last_toplevel_compiles[CompLevel_count - 1]; + int _highest_top_level; + int _level_mask; // bit-set of all possible levels + bool _was_inlined; + bool _was_toplevel; + // metadata snapshots of final state: + MethodCounters* _final_counters; + MethodData* _final_profile; + + MethodTrainingData(); + MethodTrainingData(Method* method, KlassTrainingData* ktd) : TrainingData(method) { + _klass = ktd; + _holder = method; + for (int i = 0; i < CompLevel_count - 1; i++) { + _last_toplevel_compiles[i] = nullptr; + } + _highest_top_level = CompLevel_none; + _level_mask = 0; + _was_inlined = _was_toplevel = false; + } + + static int level_mask(int level) { + return ((level & 0xF) != level ? 0 : 1 << level); + } + static CompLevel highest_level(int mask) { + if (mask == 0) return (CompLevel) 0; + int diff = (count_leading_zeros(level_mask(0)) - count_leading_zeros(mask)); + return (CompLevel) diff; + } + + public: + KlassTrainingData* klass() const { return _klass; } + bool has_holder() const { return _holder != nullptr; } + Method* holder() const { return _holder; } + bool only_inlined() const { return !_was_toplevel; } + bool never_inlined() const { return !_was_inlined; } + bool saw_level(CompLevel l) const { return (_level_mask & level_mask(l)) != 0; } + int highest_level() const { return highest_level(_level_mask); } + int highest_top_level() const { return _highest_top_level; } + MethodData* final_profile() const { return _final_profile; } + + Symbol* name() const { + precond(has_holder()); + return holder()->name(); + } + Symbol* signature() const { + precond(has_holder()); + return holder()->signature(); + } + + CompileTrainingData* last_toplevel_compile(int level) const { + if (level > CompLevel_none) { + return _last_toplevel_compiles[level - 1]; + } + return nullptr; + } + + void notice_compilation(int level, bool inlined = false) { + if (inlined) { + _was_inlined = true; + } else { + _was_toplevel = true; + } + _level_mask |= level_mask(level); + } + + void notice_toplevel_compilation(int level) { + _highest_top_level = MAX2(_highest_top_level, level); + } + + static MethodTrainingData* make(const methodHandle& method, + bool null_if_not_found = false, + bool use_cache = true) NOT_CDS_RETURN_(nullptr); + static MethodTrainingData* find_fast(const methodHandle& method) { return make(method, true, true); } + static MethodTrainingData* find(const methodHandle& method) { return make(method, true, false); } + + virtual MethodTrainingData* as_MethodTrainingData() const { + return const_cast(this); + }; + + void print_on(outputStream* st, bool name_only) const; + virtual void print_on(outputStream* st) const { print_on(st, false); } + virtual void print_value_on(outputStream* st) const { print_on(st, true); } + + virtual void prepare(Visitor& visitor); + virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN; + + template + void iterate_compiles(Function fn) const { // lambda enabled API + for (int i = 0; i < CompLevel_count - 1; i++) { + CompileTrainingData* ctd = _last_toplevel_compiles[i]; + if (ctd != nullptr) { + fn(ctd); + } + } + } + + virtual void metaspace_pointers_do(MetaspaceClosure* iter); + virtual MetaspaceObj::Type type() const { return MethodTrainingDataType; } + +#if INCLUDE_CDS + virtual void remove_unshareable_info(); +#endif + + virtual int size() const { + return (int)align_metadata_size(align_up(sizeof(MethodTrainingData), BytesPerWord)/BytesPerWord); + } + + virtual const char* internal_name() const { + return "{ method training data }"; + }; + + void verify(); + + static MethodTrainingData* allocate(Method* m, KlassTrainingData* ktd) { + return TrainingData::allocate(m, ktd); + } +}; +#endif // SHARE_OOPS_TRAININGDATA_HPP diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index 3756cdd4fd2d6..4805189a52300 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -31,6 +31,7 @@ #include "logging/logAsyncWriter.hpp" #include "memory/universe.hpp" #include "nmt/memTracker.hpp" +#include "oops/trainingData.hpp" #include "prims/downcallLinker.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -185,6 +186,10 @@ jint init_globals2() { } #endif + if (TrainingData::have_data() || TrainingData::need_data()) { + TrainingData::initialize(); + } + if (!universe_post_init()) { return JNI_ERR; } diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index faed0d5f95299..17d18b8a34558 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -89,6 +89,8 @@ Monitor* InitCompleted_lock = nullptr; Monitor* BeforeExit_lock = nullptr; Monitor* Notify_lock = nullptr; Mutex* ExceptionCache_lock = nullptr; +Mutex* TrainingData_lock = nullptr; +Monitor* TrainingReplayQueue_lock = nullptr; #ifndef PRODUCT Mutex* FullGCALot_lock = nullptr; #endif @@ -254,6 +256,8 @@ void mutex_init() { MUTEX_DEFN(CompiledIC_lock , PaddedMutex , nosafepoint); // locks VtableStubs_lock MUTEX_DEFN(MethodCompileQueue_lock , PaddedMonitor, safepoint); + MUTEX_DEFL(TrainingData_lock , PaddedMutex , MethodCompileQueue_lock); + MUTEX_DEFN(TrainingReplayQueue_lock , PaddedMonitor, safepoint); MUTEX_DEFN(CompileStatistics_lock , PaddedMutex , safepoint); MUTEX_DEFN(DirectivesStack_lock , PaddedMutex , nosafepoint); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 3bd01b575c4ea..2d6f0ee15952d 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -82,6 +82,8 @@ extern Mutex* Compile_lock; // a lock held when Compilation extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization extern Monitor* Compilation_lock; // a lock used to pause compilation +extern Mutex* TrainingData_lock; // a lock used when accessing training records +extern Monitor* TrainingReplayQueue_lock; // a lock held when class are added/removed to the training replay queue extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics extern Mutex* DirectivesStack_lock; // a lock held when mutating the dirstack and ref counting directives diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index 203062582a0e2..6d687376938ba 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -809,6 +809,11 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // cache the system and platform class loaders SystemDictionary::compute_java_loaders(CHECK_JNI_ERR); + // Initiate replay training processing once preloading is over. + CompileBroker::init_training_replay(); + + AOTLinkedClassBulkLoader::replay_training_at_init_for_preloaded_classes(CHECK_JNI_ERR); + if (Continuations::enabled()) { // Initialize Continuation class now so that failure to create enterSpecial/doYield // special nmethods due to limited CodeCache size can be treated as a fatal error at diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index f865380fdb7de..5850400fa6e75 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -1022,6 +1022,7 @@ declare_type(ServiceThread, JavaThread) \ declare_type(NotificationThread, JavaThread) \ declare_type(CompilerThread, JavaThread) \ + declare_type(TrainingReplayThread, JavaThread) \ declare_type(StringDedupThread, JavaThread) \ declare_type(AttachListenerThread, JavaThread) \ DEBUG_ONLY(COMPILER2_OR_JVMCI_PRESENT( \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java index e6a5d00ca1cb4..f8500276779e2 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java @@ -116,7 +116,8 @@ private static void initialize(TypeDataBase db) { } private static void populateMetadataTypeArray(TypeDataBase db) { - metadataTypeArray = new Type[9]; + metadataTypeArray = new Type[11]; + // The order needs to match up with CPP_VTABLE_TYPES_DO in src/hotspot/share/cds/cppVtables.cpp metadataTypeArray[0] = db.lookupType("ConstantPool"); metadataTypeArray[1] = db.lookupType("InstanceKlass"); @@ -125,8 +126,10 @@ private static void populateMetadataTypeArray(TypeDataBase db) { metadataTypeArray[4] = db.lookupType("InstanceRefKlass"); metadataTypeArray[5] = db.lookupType("InstanceStackChunkKlass"); metadataTypeArray[6] = db.lookupType("Method"); - metadataTypeArray[7] = db.lookupType("ObjArrayKlass"); - metadataTypeArray[8] = db.lookupType("TypeArrayKlass"); + metadataTypeArray[9] = db.lookupType("MethodData"); + metadataTypeArray[8] = db.lookupType("MethodCounters"); + metadataTypeArray[9] = db.lookupType("ObjArrayKlass"); + metadataTypeArray[10] = db.lookupType("TypeArrayKlass"); } public FileMapHeader getHeader() { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java index b6935963de9ef..352641b2c3c67 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java @@ -151,6 +151,7 @@ private static synchronized void initialize(TypeDataBase db) { if (!VM.getVM().isCore()) { virtualConstructor.addMapping("CompilerThread", CompilerThread.class); + virtualConstructor.addMapping("TrainingReplayThread", TrainingReplayThread.class); } // These are all the visible JavaThread subclasses that execute java code. diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/TrainingReplayThread.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/TrainingReplayThread.java new file mode 100644 index 0000000000000..4980d149feb38 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/TrainingReplayThread.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.runtime; + +import sun.jvm.hotspot.debugger.Address; + +public class TrainingReplayThread extends JavaThread { + public TrainingReplayThread(Address addr) { + super(addr); + } + + public boolean isHiddenFromExternalView() { return true; } +} diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotProfile/AOTProfileFlags.java b/test/hotspot/jtreg/runtime/cds/appcds/aotProfile/AOTProfileFlags.java new file mode 100644 index 0000000000000..f4de002071d04 --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotProfile/AOTProfileFlags.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @summary Sanity test of combinations of the diagnostic flags [+-]AOTRecordTraining and [+-]AOTReplayTraining + * @requires vm.cds + * @comment work around JDK-8345635 + * @requires !vm.jvmci.enabled + * @library /test/lib /test/setup_aot + * @build AOTProfileFlags JavacBenchApp + * @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar + * JavacBenchApp + * JavacBenchApp$ClassFile + * JavacBenchApp$FileManager + * JavacBenchApp$SourceFile + * @run driver AOTProfileFlags + */ + +import jdk.test.lib.cds.SimpleCDSAppTester; + +public class AOTProfileFlags { + public static void main(String... args) throws Exception { + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 2; j ++) { + if (i == 1 && j == 1) { + // They are both on by default. No need to test this combination. + break; + } + SimpleCDSAppTester.of("AOTProfileFlags" + i + "" + j) + .addVmArgs("-XX:+UnlockDiagnosticVMOptions", + "-XX:" + (i == 0 ? "-" : "+") + "AOTRecordTraining", + "-XX:" + (j == 0 ? "-" : "+") + "AOTReplayTraining") + .classpath("app.jar") + .appCommandLine("JavacBenchApp", "10") + .runAOTWorkflow(); + } + } + } +}