diff --git a/CMakeLists.txt b/CMakeLists.txt index 0dba8dce28f18530fd505dc6f097f2f96cdb2770..08322d349f46c624085fa7966fb51be87b4569ae 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -338,3 +338,8 @@ add_subdirectory(extras) # ----- Quickening tool -------------------------------------------------------- add_subdirectory(quickener) + +# ----- Panda tools ----------------------------------------------- + +add_subdirectory(tools) + diff --git a/assembler/assembly-type.h b/assembler/assembly-type.h index ab001d56e31986caf06c6248a3bd496fae608b33..4df4bcde5df24fc0d9f4574797b8a680f8b6ea81 100644 --- a/assembler/assembly-type.h +++ b/assembler/assembly-type.h @@ -27,6 +27,7 @@ public: enum VerificationType { TYPE_ID_OBJECT, TYPE_ID_ARRAY, + TYPE_ID_CLASS, TYPE_ID_ANY_OBJECT, }; diff --git a/assembler/templates/ins_create_api.h.erb b/assembler/templates/ins_create_api.h.erb index 85b823c8e6283d6515fa6cee547b695b03dd6bf7..23988a9c9649bd029f9bb6acf87da3f4625fd0e9 100644 --- a/assembler/templates/ins_create_api.h.erb +++ b/assembler/templates/ins_create_api.h.erb @@ -24,8 +24,10 @@ namespace panda::pandasm { % insn = group.first % signature = assembler_signature(group, insn.jump?) % signature_str = signature.map { |o| "#{o.type} #{o.name}" }.join(', ') +// NOLINTNEXTLINE(readability-identifier-naming) inline Ins Create_<%= insn.asm_token %>(<%= signature_str %>) { + // NOLINTNEXTLINE(readability-identifier-naming) Ins <%=insn.emitter_name%>_; <%=group.first.emitter_name%>_.opcode = Opcode::<%= insn.asm_token %>; % format = format_ops(insn.format).select { |o| o.name != 'prof' } diff --git a/assembler/templates/ins_emit.h.erb b/assembler/templates/ins_emit.h.erb index c5aa20c1d9918136ff3309f6527ff68a9f3904cc..228b19beee20915413e10c97314da0c84b3f4632 100644 --- a/assembler/templates/ins_emit.h.erb +++ b/assembler/templates/ins_emit.h.erb @@ -75,7 +75,7 @@ bool Ins::Emit(BytecodeEmitter& emitter, panda_file::MethodItem *method, % elsif insn.properties.include?('string_id') % ops << 'strings.find(ids[0])->second->GetOffset()' % elsif insn.properties.include?('literalarray_id') -% ops << 'static_cast(literalarrays.find(ids[0])->second->GetOffset())' +% ops << 'static_cast(literalarrays.find(ids[0])->second->GetIndex())' % else % raise "Unexpected ID type" % end diff --git a/assembler/templates/opcode_parsing.h.erb b/assembler/templates/opcode_parsing.h.erb index 5d10367eb3aa4ed00c65d161372906fcce5b7cd3..28f2af9f8aa44aaab2d5b6502fafcaf4c69298c4 100644 --- a/assembler/templates/opcode_parsing.h.erb +++ b/assembler/templates/opcode_parsing.h.erb @@ -97,6 +97,8 @@ bool Parser::ParseOperands() ParseOperandType(Type::VerificationType::TYPE_ID_ARRAY); % elsif (verification.include?("type_id_object")) ParseOperandType(Type::VerificationType::TYPE_ID_OBJECT); +% elsif (verification.include?("type_id_class")) + ParseOperandType(Type::VerificationType::TYPE_ID_CLASS); % elsif (verification.include?("type_id_any_object")) ParseOperandType(Type::VerificationType::TYPE_ID_ANY_OBJECT); % end diff --git a/cmake/Definitions.cmake b/cmake/Definitions.cmake index b8f9ffa3288b69a63922f3bb56d80a8422e2bbc1..fedaaaffaabea8fde36889d00208dd26637e451f 100644 --- a/cmake/Definitions.cmake +++ b/cmake/Definitions.cmake @@ -276,9 +276,9 @@ panda_promote_to_definitions( option(PANDA_CROSS_COMPILER "Enable compiler cross-compilation support" ON) option(PANDA_COMPILER_TARGET_X86 "Build x86-backend") -option(PANDA_COMPILER_TARGET_X86_64 "Build x86_64-backend") -option(PANDA_COMPILER_TARGET_AARCH32 "Build aarch32-backend") -option(PANDA_COMPILER_TARGET_AARCH64 "Build aarch64-backend") +option(PANDA_COMPILER_TARGET_X86_64 "Build x86_64-backend" ON) +option(PANDA_COMPILER_TARGET_AARCH32 "Build aarch32-backend" ON) +option(PANDA_COMPILER_TARGET_AARCH64 "Build aarch64-backend" ON) # User-specified cross-toolchains: option(PANDA_CROSS_X86_64_TOOLCHAIN_FILE "Absolute path to X86_64 target toolchain" OFF) option(PANDA_CROSS_AARCH64_TOOLCHAIN_FILE "Absolute path to AARCH64 target toolchain" OFF) @@ -300,24 +300,28 @@ if (PANDA_TARGET_AMD64) find_program(GCC_AARCH64_CXX "aarch64-linux-gnu-g++") find_program(GCC_ARM_CXX "arm-linux-gnueabi-g++") - if (PANDA_CROSS_AARCH64_TOOLCHAIN_FILE) - set(PANDA_COMPILER_TARGET_AARCH64 ON) - message(STATUS "Specified AARCH64 toolchain: ${PANDA_CROSS_AARCH64_TOOLCHAIN_FILE}") - elseif (GCC_AARCH64_CXX) - set(PANDA_COMPILER_TARGET_AARCH64 ON) - message(STATUS "Detected default AARCH64 toolchain") - else() - message(STATUS "No AARCH64 toolchain found") + # The option is ON by default; this 'if' allows to check if the target wasn't turned off explicitly: + if (PANDA_COMPILER_TARGET_AARCH64) + if (PANDA_CROSS_AARCH64_TOOLCHAIN_FILE) + message(STATUS "Specified AARCH64 toolchain: ${PANDA_CROSS_AARCH64_TOOLCHAIN_FILE}") + elseif (GCC_AARCH64_CXX) + message(STATUS "Detected default AARCH64 toolchain") + else() + set(PANDA_COMPILER_TARGET_AARCH64 OFF) + message(STATUS "No AARCH64 toolchain found") + endif() endif() - if (PANDA_CROSS_AARCH32_TOOLCHAIN_FILE) - set(PANDA_COMPILER_TARGET_AARCH32 ON) - message(STATUS "Specified AARCH32 toolchain: ${PANDA_CROSS_AARCH32_TOOLCHAIN_FILE}") - elseif (GCC_ARM_CXX) - set(PANDA_COMPILER_TARGET_AARCH32 ON) - message(STATUS "Detected default AARCH32 toolchain") - else() - message(STATUS "No AARCH32 toolchain found") + # The option is ON by default; this 'if' allows to check if the target wasn't turned off explicitly: + if (PANDA_COMPILER_TARGET_AARCH64) + if (PANDA_CROSS_AARCH32_TOOLCHAIN_FILE) + message(STATUS "Specified AARCH32 toolchain: ${PANDA_CROSS_AARCH32_TOOLCHAIN_FILE}") + elseif (GCC_ARM_CXX) + message(STATUS "Detected default AARCH32 toolchain") + else() + set(PANDA_COMPILER_TARGET_AARCH32 OFF) + message(STATUS "No AARCH32 toolchain found") + endif() endif() # TODO(dkofanov): cross-values do not support x86 set(PANDA_COMPILER_TARGET_X86 OFF) @@ -328,19 +332,32 @@ if (PANDA_TARGET_AMD64) endif() if (PANDA_TARGET_X86) - set(PANDA_COMPILER_TARGET_X86 ON) + set(PANDA_COMPILER_TARGET_X86 ON) + set(PANDA_COMPILER_TARGET_X86_64 OFF) + set(PANDA_COMPILER_TARGET_AARCH32 OFF) + set(PANDA_COMPILER_TARGET_AARCH64 OFF) endif() if (PANDA_TARGET_ARM32) if(PANDA_TARGET_ARM32_ABI_SOFT) set(PANDA_COMPILER_ENABLE FALSE) + set(PANDA_COMPILER_TARGET_X86 OFF) + set(PANDA_COMPILER_TARGET_X86_64 OFF) + set(PANDA_COMPILER_TARGET_AARCH32 OFF) + set(PANDA_COMPILER_TARGET_AARCH64 OFF) else() - set(PANDA_COMPILER_TARGET_AARCH32 ON) + set(PANDA_COMPILER_TARGET_X86 OFF) + set(PANDA_COMPILER_TARGET_X86_64 OFF) + set(PANDA_COMPILER_TARGET_AARCH32 ON) + set(PANDA_COMPILER_TARGET_AARCH64 OFF) endif() endif() if (PANDA_TARGET_ARM64) - set(PANDA_COMPILER_TARGET_AARCH64 ON) + set(PANDA_COMPILER_TARGET_X86 OFF) + set(PANDA_COMPILER_TARGET_X86_64 OFF) + set(PANDA_COMPILER_TARGET_AARCH32 OFF) + set(PANDA_COMPILER_TARGET_AARCH64 ON) endif() option(PANDA_ENABLE_BYTECODE_PROFILING "Enable bytecode profiling" OFF) diff --git a/cmake/HostTools.cmake b/cmake/HostTools.cmake index 44ab9aa405b88eb47c7c6daa2acdf07ec61dfc90..59732e5b6ae3e726093b092662552c8faa2f6857 100644 --- a/cmake/HostTools.cmake +++ b/cmake/HostTools.cmake @@ -82,6 +82,7 @@ function(panda_configure_host_tools) ark_asm c_secshared zlib + arkquick ) add_custom_target(irtoc_fastpath) diff --git a/compiler/cmake/target.cmake b/compiler/cmake/target.cmake index 752bcc1ab536c26c7feed57fb23d25937034ca76..f8caa114ce3b361b24e1ef8215357e26a8f72238 100644 --- a/compiler/cmake/target.cmake +++ b/compiler/cmake/target.cmake @@ -34,10 +34,10 @@ panda_promote_to_definitions( if (PANDA_COMPILER_TARGET_AARCH32) if (PANDA_TARGET_ARM64) - message(ERROR "Unimplemented multi-build aarch32 on aarch64-target") + message(FATAL_ERROR "Unimplemented multi-build aarch32 on aarch64-target") endif() if (PANDA_TARGET_X86) - message(ERROR "Unimplemented multi-build aarch32 on x86-build-target") + message(FATAL_ERROR "Unimplemented multi-build aarch32 on x86-build-target") endif() # ABI must be defined for build on non-arm archs if (NOT(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP OR PANDA_TARGET_ARM32_ABI_HARD)) @@ -47,10 +47,10 @@ endif() if (PANDA_COMPILER_TARGET_AARCH64) if (PANDA_TARGET_ARM32) - message(ERROR "Unimplemented multi-build aarch64 on aarch32-target") + message(FATAL_ERROR "Unimplemented multi-build aarch64 on aarch32-target") endif() if (PANDA_TARGET_X86) - message(ERROR "Unimplemented multi-build aarch64 on x86-build-target") + message(FATAL_ERROR "Unimplemented multi-build aarch64 on x86-build-target") endif() endif() diff --git a/compiler/optimizer/code_generator/callconv.h b/compiler/optimizer/code_generator/callconv.h index edfd438c7b436a030eca2905cd34de672b9e6379..a111e6699b72d550e5d4669718683c803043926a 100644 --- a/compiler/optimizer/code_generator/callconv.h +++ b/compiler/optimizer/code_generator/callconv.h @@ -122,14 +122,14 @@ public: // Compile for osr (jit - otherwise) DECLARE_CALLCONV_MODE(Osr); // The method from dynamic language - DECLARE_CALLCONV_MODE(Dyn); + DECLARE_CALLCONV_MODE(DynCall); #undef DECLARE_CALLCONV_MODE private: using FlagPanda = BitField; using FlagOsr = FlagPanda::NextFlag; - using FlagDyn = FlagOsr::NextFlag; + using FlagDynCall = FlagOsr::NextFlag; uint32_t value_ {0}; @@ -141,6 +141,54 @@ inline CallConvMode operator|(CallConvMode a, CallConvMode b) return CallConvMode(a.value_ | b.value_); } +/** + * Holds specific information about dynamic call mode + */ +class CallConvDynInfo { +public: + // Fixed parameter regs + enum : uint8_t { + REG_METHOD = 0, + REG_NUM_ARGS, + REG_COUNT, + }; + + // Call frame slots + enum : uint8_t { + FIXED_SLOT_COUNT = 0, + SLOT_CALLEE = FIXED_SLOT_COUNT, + }; + + explicit CallConvDynInfo() = default; + + explicit CallConvDynInfo(uint32_t num_expected_args, uintptr_t expand_entrypoint_tls_offset) + : expand_entrypoint_tls_offset_(expand_entrypoint_tls_offset), + num_expected_args_(num_expected_args), + check_required_(true) + { + } + + auto GetExpandEntrypointTlsOffset() + { + return expand_entrypoint_tls_offset_; + } + + auto GetNumExpectedArgs() + { + return num_expected_args_; + } + + auto IsCheckRequired() + { + return check_required_; + } + +private: + uintptr_t expand_entrypoint_tls_offset_ {0}; + uint32_t num_expected_args_ {0}; + bool check_required_ {false}; +}; + /** * CallConv - just holds information about calling convention in current architecture. */ @@ -192,6 +240,16 @@ public: return false; } + void SetDynInfo(CallConvDynInfo dyn_info) + { + dyn_info_ = dyn_info; + } + + CallConvDynInfo &GetDynInfo() + { + return dyn_info_; + } + CallConvMode GetMode() const { return mode_; @@ -207,9 +265,9 @@ public: return mode_.IsOsr(); } - bool IsDynMode() const + bool IsDynCallMode() const { - return mode_.IsDyn(); + return mode_.IsDynCall(); } #ifdef PANDA_COMPILER_DEBUG_INFO @@ -258,6 +316,7 @@ private: #ifdef PANDA_COMPILER_DEBUG_INFO CfiInfo cfi_info_; #endif + CallConvDynInfo dyn_info_ {}; CallConvMode mode_ {0}; }; } // namespace panda::compiler diff --git a/compiler/optimizer/code_generator/codegen.cpp b/compiler/optimizer/code_generator/codegen.cpp index 4a254f0e46c6fdf237667f5b580b90f9d87d1919..bca2a5919b357f854dc7aa6d1ccb409992c4d3eb 100644 --- a/compiler/optimizer/code_generator/codegen.cpp +++ b/compiler/optimizer/code_generator/codegen.cpp @@ -60,7 +60,7 @@ class OsrEntryStub { } else if (location.IsStack()) { auto slot = location.GetValue(); encoder->EncodeSti( - Imm(inst->CastToConstant()->GetRawValue()), + bit_cast(inst->CastToConstant()->GetRawValue()), DOUBLE_WORD_SIZE_BYTES, MemRef(codegen->SpReg(), codegen->GetFrameLayout().GetSpillOffsetFromSpInBytes(slot))); } else { ASSERT(location.IsConstant()); @@ -168,7 +168,7 @@ size_t Codegen::GetLanguageExtensionOffsetFromSpInBytes() { auto frame_layout = GetFrameLayout(); size_t lang_ext_slots_count = - GetGraph()->GetRuntime()->GetLanguageExtensionSize() / PointerSize(GetGraph()->GetArch()); + GetGraph()->GetRuntime()->GetLanguageExtensionSize(GetGraph()->GetArch()) / PointerSize(GetGraph()->GetArch()); return frame_layout.GetSpillOffsetFromSpInBytes(lang_ext_slots_count + GetGraph()->GetExtSlotsStart() - 1); } @@ -367,11 +367,10 @@ void Codegen::GeneratePrologue() GetCallingConvention()->GeneratePrologue(*frame_info_); if (!GetGraph()->GetMode().IsNative()) { - GetEncoder()->EncodeSti(Imm(static_cast(1)), - MemRef(ThreadReg(), GetRuntime()->GetTlsFrameKindOffset(GetArch()))); + GetEncoder()->EncodeSti(1, 1, MemRef(ThreadReg(), GetRuntime()->GetTlsFrameKindOffset(GetArch()))); } if (GetGraph()->IsDynamicMethod()) { - GenerateExtensionsForPrologue(); + GenerateExtensionsForPrologue(GetRuntime()->GetMethodSourceLanguage(GetGraph()->GetMethod())); } if (!GetGraph()->GetMode().IsNative()) { // Create stack overflow check @@ -386,12 +385,12 @@ void Codegen::GeneratePrologue() SCOPED_DISASM_STR(this, "LoadMethod for trace"); ScopedTmpReg method_reg(GetEncoder()); LoadMethod(method_reg); - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), method_reg, - Imm(static_cast(events::MethodEnterKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), method_reg, + Imm(static_cast(events::MethodEnterKind::COMPILED))); } else { - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), - Imm(reinterpret_cast(GetGraph()->GetMethod())), - Imm(static_cast(events::MethodEnterKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), + Imm(reinterpret_cast(GetGraph()->GetMethod())), + Imm(static_cast(events::MethodEnterKind::COMPILED))); } #endif } @@ -401,7 +400,7 @@ void Codegen::GenerateEpilogue() SCOPED_DISASM_STR(this, "Method Epilogue"); if (GetGraph()->IsDynamicMethod()) { - GenerateExtensionsForEpilogue(); + GenerateExtensionsForEpilogue(GetRuntime()->GetMethodSourceLanguage(GetGraph()->GetMethod())); } #if defined(EVENT_METHOD_EXIT_ENABLED) && EVENT_METHOD_EXIT_ENABLED != 0 @@ -409,12 +408,12 @@ void Codegen::GenerateEpilogue() if (GetGraph()->IsAotMode()) { ScopedTmpReg method_reg(GetEncoder()); LoadMethod(method_reg); - InsertTrace({Imm(static_cast(TraceId::METHOD_EXIT)), method_reg, - Imm(static_cast(events::MethodExitKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_EXIT)), method_reg, + Imm(static_cast(events::MethodExitKind::COMPILED))); } else { - InsertTrace({Imm(static_cast(TraceId::METHOD_EXIT)), - Imm(reinterpret_cast(GetGraph()->GetMethod())), - Imm(static_cast(events::MethodExitKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_EXIT)), + Imm(reinterpret_cast(GetGraph()->GetMethod())), + Imm(static_cast(events::MethodExitKind::COMPILED))); } }); #else @@ -559,6 +558,19 @@ bool Codegen::RunImpl() return false; } + if (GetCallingConvention()->IsDynCallMode()) { + auto num_expected_args = GetRuntime()->GetMethodTotalArgumentsCount(GetGraph()->GetMethod()); + if (num_expected_args > GetRuntime()->GetDynamicNumFixedArgs()) { + CallConvDynInfo dyn_info(num_expected_args, + GetRuntime()->GetEntrypointTlsOffset( + GetArch(), RuntimeInterface::EntrypointId::EXPAND_COMPILED_CODE_ARGS_DYN)); + GetCallingConvention()->SetDynInfo(dyn_info); + frame_info_->SetSaveFrameAndLinkRegs(true); + } else { + GetCallingConvention()->SetDynInfo(CallConvDynInfo()); + } + } + if (!GetEncoder()->GetResult()) { return false; } @@ -708,48 +720,6 @@ Reg Codegen::ConvertRegister(Register r, DataType::Type type) } } -Imm Codegen::ConvertImm(uint64_t imm, DataType::Type type) -{ - switch (type) { - // NOLINTNEXTLINE(bugprone-branch-clone) - case DataType::BOOL: - case DataType::UINT8: - return Imm(bit_cast(imm)); - case DataType::INT8: - return Imm(bit_cast(imm)); - // NOLINTNEXTLINE(bugprone-branch-clone) - case DataType::UINT16: - return Imm(bit_cast(imm)); - case DataType::INT16: - return Imm(bit_cast(imm)); - // NOLINTNEXTLINE(bugprone-branch-clone) - case DataType::UINT32: - return Imm(bit_cast(imm)); - case DataType::INT32: - return Imm(bit_cast(imm)); - case DataType::ANY: - return Imm(bit_cast(imm)); - // NOLINTNEXTLINE(bugprone-branch-clone) - case DataType::UINT64: - return Imm(bit_cast(imm)); - case DataType::INT64: - return Imm(bit_cast(imm)); - case DataType::FLOAT32: - return Imm(bit_cast(static_cast(imm))); - case DataType::FLOAT64: - return Imm(bit_cast(imm)); - case DataType::REFERENCE: - if (imm == 0) { - return Imm(0); - } - [[fallthrough]]; /* fall-through */ - default: - // Invalid converted immediate - UNREACHABLE(); - } - return INVALID_IMM; -} - // Panda don't support types less then 32, so we need sign or zero extended to 32 Imm Codegen::ConvertImmWithExtend(uint64_t imm, DataType::Type type) { @@ -788,37 +758,7 @@ Imm Codegen::ConvertImmWithExtend(uint64_t imm, DataType::Type type) // Invalid converted immediate UNREACHABLE(); } - return INVALID_IMM; -} - -Imm Codegen::ConvertImm(ConstantInst *const_inst, DataType::Type type) -{ - switch (type) { - case DataType::BOOL: - case DataType::UINT8: - case DataType::INT8: - case DataType::UINT16: - case DataType::INT16: - case DataType::UINT32: - case DataType::INT32: - return Imm(static_cast(const_inst->GetIntValue())); - case DataType::UINT64: - case DataType::INT64: - if (const_inst->GetType() == DataType::ANY) { - return Imm(const_inst->GetRawValue()); - } - return Imm(static_cast(const_inst->GetIntValue())); - case DataType::FLOAT32: - return Imm(const_inst->GetFloatValue()); - case DataType::FLOAT64: - return Imm(const_inst->GetDoubleValue()); - case DataType::ANY: - return Imm(const_inst->GetRawValue()); - default: - // Invalid converted immediate - UNREACHABLE(); - } - return INVALID_IMM; + return Imm(0); } Condition Codegen::ConvertCc(ConditionCode cc) @@ -1069,28 +1009,6 @@ void Codegen::CreateOsrEntry(SaveStateInst *save_state) GetEncoder()->BindLabel(stub->GetLabel()); } -/** - * Insert tracing code to the generated code. See `Trace` method in the `runtime/entrypoints.cpp`. - * TODO(compiler): we should rework parameters assigning algorithm, that is duplicated here. - * @param params parameters to be passed to the TRACE entrypoint, first parameter must be TraceId value. - */ -void Codegen::InsertTrace(std::initializer_list> params) -{ - SCOPED_DISASM_STR(this, "Trace"); - [[maybe_unused]] constexpr size_t MAX_PARAM_NUM = 8; - ASSERT(params.size() <= MAX_PARAM_NUM); - auto regfile = GetRegfile(); - auto save_regs = regfile->GetCallerSavedRegMask(); - save_regs.set(GetTarget().GetReturnRegId()); - auto save_vregs = regfile->GetCallerSavedVRegMask(); - save_vregs.set(GetTarget().GetReturnFpRegId()); - - SaveCallerRegisters(save_regs, save_vregs, false); - FillCallParams(params); - EmitCallRuntimeCode(nullptr, EntrypointId::TRACE); - LoadCallerRegisters(save_regs, save_vregs, false); -} - void Codegen::CallIntrinsic(Inst *inst, RuntimeInterface::IntrinsicId id) { SCOPED_DISASM_STR(this, "CallIntrinsic"); @@ -1104,55 +1022,6 @@ void Codegen::CallIntrinsic(Inst *inst, RuntimeInterface::IntrinsicId id) } } -void Codegen::FillCallParams(const std::initializer_list> ¶ms) -{ - SCOPED_DISASM_STR(this, "FillCallParams"); - // Native call - do not add reserve parameters - auto param_info = GetCallingConvention()->GetParameterInfo(0); - ArenaVector> immediates(GetLocalAllocator()->Adapter()); - ArenaVector sp_moves(GetLocalAllocator()->Adapter()); - auto reg_moves = GetGraph()->CreateInstSpillFill(); - for (auto ¶m : params) { - Location dst; - if (std::holds_alternative(param)) { - auto reg = std::get(param); - auto type = reg.GetType().ToDataType(); - dst = param_info->GetNextLocation(type); - if (reg == SpReg()) { - // SP should be handled separately, since on the ARM64 target it has ID out of range - sp_moves.emplace_back(ConvertRegister(dst.GetValue(), type)); - } else { - reg_moves->AddSpillFill(Location::MakeRegister(reg.GetId(), type), dst, type); - } - } else { - auto imm = std::get(param); - auto type = imm.GetType().ToDataType(); - dst = param_info->GetNextLocation(type); - auto reg = ConvertRegister(dst.GetValue(), type); - immediates.emplace_back(reg, imm); - } - - if (dst.IsStackArgument()) { - GetEncoder()->SetFalseResult(); - UNREACHABLE(); // Move to BoundaryFrame - } - } - - // Resolve registers move order and encode - spill_fills_resolver_.ResolveIfRequired(reg_moves); - SpillFillEncoder(this, reg_moves).EncodeSpillFill(); - - // Encode immediates moves - for (auto &imm_values : immediates) { - GetEncoder()->EncodeMov(imm_values.first, imm_values.second); - } - - // Encode moves from SP reg - for (auto dst : sp_moves) { - GetEncoder()->EncodeMov(dst, SpReg()); - } -} - bool Codegen::EmitCallRuntimeCode(Inst *inst, EntrypointId id) { MemRef entry(ThreadReg(), GetRuntime()->GetEntrypointTlsOffset(GetArch(), id)); @@ -1177,88 +1046,6 @@ bool Codegen::EmitCallRuntimeCode(Inst *inst, EntrypointId id) return true; } -[[maybe_unused]] static bool EnsureParamsFitIn32Bit(std::initializer_list> params) -{ - for (auto ¶m : params) { - if (std::holds_alternative(param)) { - auto reg = std::get(param); - if (reg.GetSize() > WORD_SIZE) { - return false; - } - } else { - auto imm = std::get(param); - if (imm.GetSize() > WORD_SIZE) { - return false; - } - } - } - return true; -} - -void Codegen::CallRuntime(Inst *inst, EntrypointId id, Reg dst_reg, - std::initializer_list> params, RegMask preserved_regs) -{ - ASSERT(inst != nullptr); - CHECK_EQ(params.size(), GetRuntime()->GetEntrypointArgsNum(id)); - if (GetArch() == Arch::AARCH32) { - // There is a problem with 64-bit parameters: - // params number passed from entrypoints_gen.S.erb will be inconsistent with Aarch32 ABI. - // Thus, runtime bridges will have wrong params number (\paramsnum macro argument). - ASSERT(EnsureParamsFitIn32Bit(params)); - ASSERT(!dst_reg.IsValid() || dst_reg.GetSize() <= WORD_SIZE); - } - - SCOPED_DISASM_STR(this, std::string("CallRuntime: ") + GetRuntime()->GetEntrypointName(id)); - RegMask live_regs {preserved_regs | GetLiveRegisters(inst).first}; - RegMask params_mask; - if (inst->HasImplicitRuntimeCall() && !GetRuntime()->IsEntrypointNoreturn(id)) { - SaveRegistersForImplicitRuntime(inst, ¶ms_mask, &live_regs); - } - // parameter regs: their initial values must be stored by the caller - // Other caller regs stored in bridges - FillOnlyParameters(&live_regs, params.size()); - SaveCallerRegisters(live_regs, VRegMask(), true); - - if (params.size() != 0) { - FillCallParams(params); - } - - // Call Code - if (!EmitCallRuntimeCode(inst, id)) { - return; - } - if (dst_reg.IsValid()) { - ASSERT(dst_reg.IsScalar()); - Reg ret_val = GetTarget().GetReturnReg(dst_reg.GetType()); - if (dst_reg.GetId() != ret_val.GetId()) { - GetEncoder()->SetRegister(&live_regs, nullptr, ret_val, true); - } - - // We must: - // sign extended INT8 and INT16 to INT32 - // zero extended UINT8 and UINT16 to UINT32 - if (dst_reg.GetSize() < WORD_SIZE) { - bool is_signed = DataType::IsTypeSigned(inst->GetType()); - GetEncoder()->EncodeCast(dst_reg, is_signed, ret_val, is_signed); - } else { - GetEncoder()->EncodeMov(dst_reg, ret_val); - } - } - - GetEncoder()->SetRegister(&live_regs, nullptr, dst_reg, false); - LoadCallerRegisters(live_regs, VRegMask(), true); - - if (!inst->HasImplicitRuntimeCall()) { - return; - } - ASSERT(!GetRuntime()->IsEntrypointNoreturn(id)); - for (auto i = 0U; i < params_mask.size(); i++) { - if (params_mask.test(i)) { - inst->GetSaveState()->GetRootsRegsMask().reset(i); - } - } -} - void Codegen::SaveRegistersForImplicitRuntime(Inst *inst, RegMask *params_mask, RegMask *mask) { auto &roots_mask = inst->GetSaveState()->GetRootsRegsMask(); @@ -1300,15 +1087,16 @@ void Codegen::CreateDebugRuntimeCallsForNewObject(Inst *inst, [[maybe_unused]] R if (OPTIONS.IsCompilerEnableTlabEvents()) { static constexpr size_t TYPE_INDEX = 1; - InsertTrace({Imm(static_cast(TraceId::TLAB_EVENT)), Imm(TYPE_INDEX), reg_tlab_start, Imm(alloc_size)}); + InsertTrace(TypedImm(static_cast(TraceId::TLAB_EVENT)), TypedImm(TYPE_INDEX), reg_tlab_start, + TypedImm(alloc_size)); } #endif #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) - CallRuntime(inst, EntrypointId::ANNOTATE_SANITIZERS, INVALID_REGISTER, {reg_tlab_start, Imm(alloc_size)}, - preserved); + CallRuntime(inst, EntrypointId::ANNOTATE_SANITIZERS, INVALID_REGISTER, preserved, reg_tlab_start, + TypedImm(alloc_size)); #endif if (GetRuntime()->IsTrackTlabAlloc()) { - CallRuntime(inst, EntrypointId::WRITE_TLAB_STATS, INVALID_REGISTER, {Imm(alloc_size)}, preserved); + CallRuntime(inst, EntrypointId::WRITE_TLAB_STATS, INVALID_REGISTER, preserved, TypedImm(alloc_size)); } } @@ -1324,12 +1112,12 @@ void Codegen::CreateNewObjCall(NewObjectInst *new_obj) auto max_tlab_size = runtime->GetTLABMaxSize(); if (max_tlab_size == 0 || init_class->GetOpcode() != Opcode::LoadAndInitClass) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } auto klass = init_class->CastToLoadAndInitClass()->GetClass(); if (klass == nullptr || !runtime->CanUseTlabForClass(klass)) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } auto class_size = runtime->GetClassSize(klass); @@ -1337,7 +1125,7 @@ void Codegen::CreateNewObjCall(NewObjectInst *new_obj) class_size = (class_size & ~(alignment - 1U)) + ((class_size % alignment) != 0U ? alignment : 0U); if (class_size > max_tlab_size) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } @@ -1346,7 +1134,7 @@ void Codegen::CreateNewObjCall(NewObjectInst *new_obj) SaveCallerRegisters(param_regs, VRegMask(), false); - FillCallParams({src_class, Imm(class_size)}); + FillCallParams(src_class, TypedImm(class_size)); MemRef entry(ThreadReg(), GetRuntime()->GetEntrypointTlsOffset(GetArch(), EntrypointId::ALLOCATE_OBJECT_TLAB)); encoder->MakeCall(entry); CreateStackMap(new_obj); @@ -1366,12 +1154,12 @@ void Codegen::CreateNewObjCallOld(NewObjectInst *new_obj) auto encoder = GetEncoder(); if (max_tlab_size == 0 || init_class->GetOpcode() != Opcode::LoadAndInitClass) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } auto klass = init_class->CastToLoadAndInitClass()->GetClass(); if (klass == nullptr || !runtime->CanUseTlabForClass(klass)) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } auto class_size = runtime->GetClassSize(klass); @@ -1379,7 +1167,7 @@ void Codegen::CreateNewObjCallOld(NewObjectInst *new_obj) class_size = (class_size & ~(alignment - 1U)) + ((class_size % alignment) != 0U ? alignment : 0U); if (class_size > max_tlab_size) { - CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + CallRuntime(new_obj, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return; } ScopedLiveTmpReg reg_tlab_start(encoder); @@ -1426,7 +1214,8 @@ void Codegen::CreateMultiArrayCall(CallInst *call_inst) ASSERT(location.IsFixedRegister() && location.IsRegisterValid()); GetEncoder()->EncodeMov(class_orig, ConvertRegister(location.GetValue(), DataType::INT32)); - CallRuntime(call_inst, EntrypointId::CREATE_MULTI_ARRAY, dst_reg, {class_reg, Imm(num_args), SpReg()}); + CallRuntime(call_inst, EntrypointId::CREATE_MULTI_ARRAY, dst_reg, RegMask::GetZeroMask(), class_reg, + TypedImm(num_args), SpReg()); } void Codegen::CreateJumpToClassResolverPltShared(Inst *inst, Reg tmp_reg, RuntimeInterface::EntrypointId id) @@ -1515,8 +1304,8 @@ void Codegen::EmitGetUnresolvedCalleeMethod(CallInst *call_inst) // by pointer to the method(LoadMethod below) ScopedTmpReg tmp(GetEncoder()); LoadMethod(tmp); - CallRuntime(call_inst, EntrypointId::GET_UNKNOWN_CALLEE_METHOD, tmp.GetReg(), - {tmp.GetReg(), Imm(call_inst->GetCallMethodId()), Imm(0)}); + CallRuntime(call_inst, EntrypointId::GET_UNKNOWN_CALLEE_METHOD, tmp.GetReg(), RegMask::GetZeroMask(), + tmp.GetReg(), TypedImm(call_inst->GetCallMethodId()), TypedImm(0)); GetEncoder()->EncodeMov(param_0, tmp.GetReg()); return; } @@ -1594,9 +1383,9 @@ void Codegen::CreateCall(CallInst *call_inst) if (call_inst->GetSaveState() != nullptr && call_inst->IsInlined()) { #if defined(EVENT_METHOD_ENTER_ENABLED) && EVENT_METHOD_ENTER_ENABLED != 0 if (!GetGraph()->IsAotMode()) { - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), - Imm(reinterpret_cast(call_inst->GetCallMethod())), - Imm(static_cast(events::MethodEnterKind::INLINED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), + Imm(reinterpret_cast(call_inst->GetCallMethod())), + Imm(static_cast(events::MethodEnterKind::INLINED))); } #endif return; @@ -1632,6 +1421,18 @@ void Codegen::CreateCallIntrinsic(IntrinsicInst *inst) if (inst->HasImms() && GetGraph()->SupportManagedCode()) { EncodeImms(inst->GetImms()); } + + size_t explicit_args; + if (IsStackRangeIntrinsic(inst->GetIntrinsicId(), &explicit_args)) { + auto param_info = GetCallingConvention()->GetParameterInfo(explicit_args); + auto range_ptr_reg = + ConvertRegister(param_info->GetNextLocation(DataType::POINTER).GetRegister(), DataType::POINTER); + if (inst->GetInputsCount() > explicit_args + (inst->RequireState() ? 1U : 0U)) { + auto range_sp_offs = GetStackOffset(inst->GetLocation(explicit_args)); + GetEncoder()->EncodeAdd(range_ptr_reg, GetTarget().GetStackReg(), Imm(range_sp_offs)); + } + } + CallIntrinsic(inst, inst->GetIntrinsicId()); if (inst->GetSaveState() != nullptr) { @@ -1677,8 +1478,8 @@ void Codegen::CreateUnresolvedVirtualMethodLoad(CallInst *vcall, Reg method) if (GetGraph()->IsAotMode()) { // TODO(zhroma): how to make more optimal? LoadMethod(method); - CallRuntime(vcall, EntrypointId::RESOLVE_VIRTUAL_CALL_AOT, method, - {method, obj_reg, Imm(vcall->GetCallMethodId()), Imm(0)}); + CallRuntime(vcall, EntrypointId::RESOLVE_VIRTUAL_CALL_AOT, method, {}, method, obj_reg, + TypedImm(vcall->GetCallMethodId()), TypedImm(0)); } else { auto runtime = GetRuntime(); auto utypes = runtime->GetUnresolvedTypes(); @@ -1779,12 +1580,12 @@ void Codegen::PrepareAndEmitCallVirtual(CallInst *call_inst) IntfInlineCachePass(call_inst, method_reg, tmp_reg, obj_reg); } else { LoadMethod(method_reg); - CallRuntime(call_inst, EntrypointId::RESOLVE_VIRTUAL_CALL_AOT, method_reg, - {method_reg, obj_reg, Imm(call_inst->GetCallMethodId()), Imm(0)}); + CallRuntime(call_inst, EntrypointId::RESOLVE_VIRTUAL_CALL_AOT, method_reg, {}, method_reg, obj_reg, + TypedImm(call_inst->GetCallMethodId()), TypedImm(0)); } } else { - CallRuntime(call_inst, EntrypointId::RESOLVE_VIRTUAL_CALL, method_reg, - {Imm(reinterpret_cast(call_inst->GetCallMethod())), obj_reg}); + CallRuntime(call_inst, EntrypointId::RESOLVE_VIRTUAL_CALL, method_reg, {}, + TypedImm(reinterpret_cast(call_inst->GetCallMethod())), obj_reg); } EmitCallVirtual(method_reg); } else if (GetGraph()->IsAotMode() && !GetGraph()->GetAotData()->GetUseCha()) { @@ -1798,6 +1599,24 @@ void Codegen::PrepareAndEmitCallVirtual(CallInst *call_inst) } } +[[maybe_unused]] bool Codegen::EnsureParamsFitIn32Bit(std::initializer_list> params) +{ + for (auto ¶m : params) { + if (std::holds_alternative(param)) { + auto reg = std::get(param); + if (reg.GetSize() > WORD_SIZE) { + return false; + } + } else { + auto imm_type = std::get(param).GetType(); + if (imm_type.GetSize() > WORD_SIZE) { + return false; + } + } + } + return true; +} + void Codegen::IntfInlineCachePass(CallInst *call_inst, Reg method_reg, Reg tmp_reg, Reg obj_reg) { // Cache structure:(offset addr)/(class addr) 32bit/32bit @@ -1826,8 +1645,8 @@ void Codegen::IntfInlineCachePass(CallInst *call_inst, Reg method_reg, Reg tmp_r aot_data->GetInfInlineCacheSlotOffset(GetEncoder()->GetCursorOffset(), intf_inline_cache_index); GetEncoder()->MakeLoadAotTableAddr(offset, reg_tmp_64, INVALID_REGISTER); LoadMethod(method_reg); - CallRuntime(call_inst, EntrypointId::INTF_INLINE_CACHE, method_reg, - {method_reg, obj_reg, Imm(call_inst->GetCallMethodId()), reg_tmp_64}); + CallRuntime(call_inst, EntrypointId::INTF_INLINE_CACHE, method_reg, {}, method_reg, obj_reg, + TypedImm(call_inst->GetCallMethodId()), reg_tmp_64); } else { // we don't have tmp reg here, so use x3 directly constexpr uint32_t REG_3 = 3; @@ -1838,8 +1657,8 @@ void Codegen::IntfInlineCachePass(CallInst *call_inst, Reg method_reg, Reg tmp_r aot_data->GetInfInlineCacheSlotOffset(GetEncoder()->GetCursorOffset(), intf_inline_cache_index); GetEncoder()->MakeLoadAotTableAddr(offset, reg_3, INVALID_REGISTER); LoadMethod(method_reg); - CallRuntime(call_inst, EntrypointId::INTF_INLINE_CACHE, method_reg, - {method_reg, obj_reg, Imm(call_inst->GetCallMethodId()), reg_3}); + CallRuntime(call_inst, EntrypointId::INTF_INLINE_CACHE, method_reg, {}, method_reg, obj_reg, + TypedImm(call_inst->GetCallMethodId()), reg_3); GetEncoder()->EncodeMov(reg_3, vtmp); } @@ -1857,89 +1676,15 @@ void Codegen::EmitCallVirtual(Reg method_reg) GetEncoder()->MakeCall(MemRef(param_0, entry_point_offset)); } -void Codegen::AddParamRegsInLiveMasks(RegMask *live_regs, VRegMask *live_vregs, - const std::initializer_list> ¶ms) -{ - auto enc = GetEncoder(); - auto callconv = GetCallingConvention(); - auto param_info = callconv->GetParameterInfo(0); - for (auto ¶m : params) { - if (std::holds_alternative(param)) { - auto reg = std::get(param); - auto curr_dst = param_info->GetNativeParam(reg.GetType()); - if (std::holds_alternative(curr_dst)) { - auto r = std::get(curr_dst); - if (r.IsScalar()) { - live_regs->set(r.GetId()); - } else { - live_vregs->set(r.GetId()); - } - } else { - enc->SetFalseResult(); - UNREACHABLE(); - } - } else { - auto imm = std::get(param); - auto curr_dst = param_info->GetNativeParam(imm.GetType()); - if (std::holds_alternative(curr_dst)) { - auto reg = std::get(curr_dst); - if (reg.IsScalar()) { - live_regs->set(reg.GetId()); - } else { - live_vregs->set(reg.GetId()); - } - } else { - enc->SetFalseResult(); - UNREACHABLE(); - } - } - } -} - -void Codegen::CreateStubCall(Inst *inst, RuntimeInterface::IntrinsicId intrinsic_id, Reg dst, - const std::initializer_list> ¶ms) -{ - VRegMask live_vregs; - RegMask live_regs; - AddParamRegsInLiveMasks(&live_regs, &live_vregs, params); - auto enc = GetEncoder(); - { - SCOPED_DISASM_STR(this, "Save caller saved regs"); - SaveCallerRegisters(live_regs, live_vregs, true); - } - - FillCallParams(params); - CallIntrinsic(inst, intrinsic_id); - - if (inst->GetSaveState() != nullptr) { - CreateStackMap(inst); - } - - if (dst.IsValid()) { - Reg ret_val = GetTarget().GetReturnReg(dst.GetType()); - if (dst.GetId() != ret_val.GetId()) { - enc->SetRegister(&live_regs, &live_vregs, ret_val, true); - } - ASSERT(dst.IsScalar()); - enc->EncodeMov(dst, ret_val); - } - - { - SCOPED_DISASM_STR(this, "Restore caller saved regs"); - enc->SetRegister(&live_regs, &live_vregs, dst, false); - LoadCallerRegisters(live_regs, live_vregs, true); - } -} - void Codegen::CreateVirtualCall(CallInst *call_inst) { SCOPED_DISASM_STR(this, "Create Call for virtual method"); if (call_inst->GetSaveState() != nullptr && call_inst->IsInlined()) { #if defined(EVENT_METHOD_ENTER_ENABLED) && EVENT_METHOD_ENTER_ENABLED != 0 if (!GetGraph()->IsAotMode()) { - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), - Imm(reinterpret_cast(call_inst->GetCallMethod())), - Imm(static_cast(events::MethodEnterKind::INLINED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), + Imm(reinterpret_cast(call_inst->GetCallMethod())), + Imm(static_cast(events::MethodEnterKind::INLINED))); } #endif return; @@ -1956,30 +1701,27 @@ void Codegen::CreateDynamicCall(CallInst *call_inst) Encoder *encoder = GetEncoder(); auto dst_reg = ConvertRegister(call_inst->GetDstReg(), call_inst->GetType()); - Reg param_0 = GetTarget().GetParamReg(0); - Reg native_ptr_reg = Reg(param_0.GetId(), TypeInfo::FromDataType(DataType::POINTER, GetArch())); - Reg param_num_args = GetTarget().GetParamReg(1); - Reg param_func_obj = GetTarget().GetParamReg(2); + Reg method_param_reg = GetTarget().GetParamReg(CallConvDynInfo::REG_METHOD).As(GetPtrRegType()); + Reg num_args_param_reg = GetTarget().GetParamReg(CallConvDynInfo::REG_NUM_ARGS); + auto param_func_obj_loc = Location::MakeStackArgument(CallConvDynInfo::SLOT_CALLEE); ASSERT(!HasLiveCallerSavedRegs(call_inst)); - // TODO(audovichenko): Check the following - // * func_obj's tag is object - // * func_obj's class is dynamic - // * target method is not null - // Load method into param0 as follow: - // param_0 = param_func_obj->GetMethod()->GetExternalPointer() - // native_ptr_reg is a (may be) smaller view of param_0 register. - encoder->EncodeLdr(native_ptr_reg, false, MemRef(param_func_obj, runtime->GetFunctionTargetOffset(GetArch()))); + // Load method from callee object + static_assert(coretypes::TaggedValue::TAG_OBJECT == 0); + encoder->EncodeLdr(method_param_reg, false, MemRef(SpReg(), GetStackOffset(param_func_obj_loc))); + encoder->EncodeLdr(method_param_reg, false, MemRef(method_param_reg, runtime->GetFunctionTargetOffset(GetArch()))); ASSERT(call_inst->GetInputsCount() > 1); auto num_args = static_cast(call_inst->GetInputsCount() - 1); // '-1' means 1 for spill fill input - encoder->EncodeMov(param_num_args, Imm(num_args)); + encoder->EncodeMov(num_args_param_reg, Imm(num_args)); size_t entry_point_offset = runtime->GetCompiledEntryPointOffset(GetArch()); - encoder->MakeCall(MemRef(param_0, entry_point_offset)); + encoder->MakeCall(MemRef(method_param_reg, entry_point_offset)); CreateStackMap(call_inst); + // Dynamic callee may have moved sp if there was insufficient num_actual_args + encoder->EncodeSub(SpReg(), FpReg(), Imm(GetFrameLayout().GetOffset(0))); if (dst_reg.IsValid()) { Reg ret_reg = GetTarget().GetReturnReg(dst_reg.GetType()); @@ -1994,15 +1736,6 @@ static T GetBarrierOperandValue(RuntimeInterface *runtime, panda::mem::BarrierPo return std::get(operand.GetValue()); } -void Codegen::CallBarrier(RegMask live_regs, VRegMask live_vregs, EntrypointId id, - const std::initializer_list> ¶ms) -{ - SaveCallerRegisters(live_regs, live_vregs, true); - FillCallParams(params); - EmitCallRuntimeCode(nullptr, id); - LoadCallerRegisters(live_regs, live_vregs, true); -} - void Codegen::CreatePreWRB(Inst *inst, MemRef mem, bool store_pair) { auto runtime = GetRuntime(); @@ -2041,7 +1774,7 @@ void Codegen::CreatePreWRB(Inst *inst, MemRef mem, bool store_pair) TryInsertImplicitNullCheck(inst, prev_offset); CheckObject(tmp_ref, label); auto [live_regs, live_vregs] = GetLiveRegisters(inst); - CallBarrier(live_regs, live_vregs, EntrypointId::PRE_WRB_FUNC_NO_BRIDGE, {tmp_ref}); + CallBarrier(live_regs, live_vregs, EntrypointId::PRE_WRB_FUNC_NO_BRIDGE, tmp_ref); if (store_pair) { // store pair doesn't support index and scalar @@ -2058,7 +1791,7 @@ void Codegen::CreatePreWRB(Inst *inst, MemRef mem, bool store_pair) enc->EncodeLdr(tmp_ref, false, MemRef(mem.GetBase(), second_offset)); } CheckObject(tmp_ref, label); - CallBarrier(live_regs, live_vregs, EntrypointId::PRE_WRB_FUNC_NO_BRIDGE, {tmp_ref}); + CallBarrier(live_regs, live_vregs, EntrypointId::PRE_WRB_FUNC_NO_BRIDGE, tmp_ref); } enc->BindLabel(label); } @@ -2177,7 +1910,7 @@ void Codegen::CreatePostInterRegionBarrier(Inst *inst, MemRef mem, Reg reg1, Reg tmp1.Release(); auto [live_regs, live_vregs] = GetLiveRegisters(inst); - CallBarrier(live_regs, live_vregs, EntrypointId::POST_WRB_UPDATE_CARD_FUNC_NO_BRIDGE, {mem_reg, reg1}); + CallBarrier(live_regs, live_vregs, EntrypointId::POST_WRB_UPDATE_CARD_FUNC_NO_BRIDGE, mem_reg, reg1); enc->BindLabel(label); if (reg2.IsValid() && reg1 != reg2) { @@ -2192,7 +1925,7 @@ void Codegen::CreatePostInterRegionBarrier(Inst *inst, MemRef mem, Reg reg1, Reg enc->EncodeShr(tmp2, tmp2, Imm(region_size_bit)); enc->EncodeJump(label1, tmp2, Condition::EQ); tmp2.Release(); - CallBarrier(live_regs, live_vregs, EntrypointId::POST_WRB_UPDATE_CARD_FUNC_NO_BRIDGE, {mem_reg, reg2}); + CallBarrier(live_regs, live_vregs, EntrypointId::POST_WRB_UPDATE_CARD_FUNC_NO_BRIDGE, mem_reg, reg2); enc->BindLabel(label1); } } @@ -2435,15 +2168,15 @@ ENCODE_INST_WITH_SHIFTED_OPERAND(INST_DEF) #undef INST_DEF // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define BinaryImmOperation(opc) \ - void EncodeVisitor::Visit##opc##I(GraphVisitor *visitor, Inst *inst) \ - { \ - auto binop = inst->CastTo##opc##I(); \ - EncodeVisitor *enc = static_cast(visitor); \ - auto type = inst->GetType(); \ - auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), type); \ - auto src0 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(0), type); \ - enc->GetEncoder()->Encode##opc(dst, src0, enc->GetCodegen()->ConvertImm(binop->GetImm(), DataType::INT64)); \ +#define BinaryImmOperation(opc) \ + void EncodeVisitor::Visit##opc##I(GraphVisitor *visitor, Inst *inst) \ + { \ + auto binop = inst->CastTo##opc##I(); \ + EncodeVisitor *enc = static_cast(visitor); \ + auto type = inst->GetType(); \ + auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), type); \ + auto src0 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(0), type); \ + enc->GetEncoder()->Encode##opc(dst, src0, Imm(binop->GetImm())); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) @@ -2508,7 +2241,7 @@ void EncodeVisitor::VisitMod(GraphVisitor *visitor, Inst *inst) auto [live_regs, live_vregs] = enc->GetCodegen()->GetLiveRegisters(inst); enc->GetCodegen()->SaveCallerRegisters(live_regs, live_vregs, true); - enc->GetCodegen()->FillCallParams({src0, src1}); + enc->GetCodegen()->FillCallParams(src0, src1); enc->GetCodegen()->CallIntrinsic(inst, entry); auto ret_val = enc->GetCodegen()->GetTarget().GetReturnFpReg(); @@ -2547,7 +2280,7 @@ void EncodeVisitor::VisitShrI(GraphVisitor *visitor, Inst *inst) auto type = inst->GetType(); auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), type); auto src0 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(0), type); - enc->GetEncoder()->EncodeShr(dst, src0, enc->GetCodegen()->ConvertImm(binop->GetImm(), DataType::INT64)); + enc->GetEncoder()->EncodeShr(dst, src0, Imm(binop->GetImm())); } void EncodeVisitor::VisitMAdd(GraphVisitor *visitor, Inst *inst) @@ -2740,8 +2473,20 @@ void EncodeVisitor::VisitConstant(GraphVisitor *visitor, Inst *inst) } auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), type); - Imm imm = enc->GetCodegen()->ConvertImm(const_inst, type); - enc->GetEncoder()->EncodeMov(dst, imm); +#ifndef NDEBUG + switch (type) { + case DataType::FLOAT32: + enc->GetEncoder()->EncodeMov(dst, Imm(const_inst->GetFloatValue())); + break; + case DataType::FLOAT64: + enc->GetEncoder()->EncodeMov(dst, Imm(const_inst->GetDoubleValue())); + break; + default: + enc->GetEncoder()->EncodeMov(dst, Imm(const_inst->GetRawValue())); + } +#else + enc->GetEncoder()->EncodeMov(dst, Imm(const_inst->GetRawValue())); +#endif } void EncodeVisitor::VisitNullPtr(GraphVisitor *visitor, Inst *inst) @@ -2754,8 +2499,7 @@ void EncodeVisitor::VisitNullPtr(GraphVisitor *visitor, Inst *inst) } auto type = inst->GetType(); auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), type); - Imm imm = enc->GetCodegen()->ConvertImm(static_cast(0), type); - enc->GetEncoder()->EncodeMov(dst, imm); + enc->GetEncoder()->EncodeMov(dst, Imm(0)); } // Next visitors use calling convention @@ -2924,7 +2668,8 @@ void EncodeVisitor::VisitLoadConstArray(GraphVisitor *visitor, Inst *inst) auto dst = enc->GetCodegen()->ConvertRegister(inst->GetDstReg(), inst->GetType()); auto array_type = inst->CastToLoadConstArray()->GetTypeId(); - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::RESOLVE_LITERAL_ARRAY, dst, Imm(array_type)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::RESOLVE_LITERAL_ARRAY, dst, + TypedImm(array_type)); } void EncodeVisitor::VisitFillConstArray(GraphVisitor *visitor, Inst *inst) @@ -2962,7 +2707,7 @@ void EncodeVisitor::VisitFillConstArray(GraphVisitor *visitor, Inst *inst) auto [live_regs, live_vregs] = enc->GetCodegen()->GetLiveRegisters(inst); enc->GetCodegen()->SaveCallerRegisters(live_regs, live_vregs, true); - enc->GetCodegen()->FillCallParams({array_reg, method_reg, Imm(array_size)}); + enc->GetCodegen()->FillCallParams(array_reg, method_reg, TypedImm(array_size)); enc->GetCodegen()->CallIntrinsic(inst, entry); enc->GetCodegen()->LoadCallerRegisters(live_regs, live_vregs, true); } else { @@ -2972,7 +2717,7 @@ void EncodeVisitor::VisitFillConstArray(GraphVisitor *visitor, Inst *inst) auto [live_regs, live_vregs] = enc->GetCodegen()->GetLiveRegisters(inst); enc->GetCodegen()->SaveCallerRegisters(live_regs, live_vregs, true); - enc->GetCodegen()->FillCallParams({array_reg, Imm(data), Imm(array_size)}); + enc->GetCodegen()->FillCallParams(array_reg, TypedImm(data), TypedImm(array_size)); enc->GetCodegen()->CallIntrinsic(inst, entry); enc->GetCodegen()->LoadCallerRegisters(live_regs, live_vregs, true); } @@ -2992,7 +2737,7 @@ void Codegen::VisitNewArray(Inst *inst) // TODO(msherstennikov): support NewArray fast path for arm32 if (max_tlab_size == 0 || GetArch() == Arch::AARCH32) { - CallRuntime(inst, EntrypointId::CREATE_ARRAY, dst, {src_class, src_size}); + CallRuntime(inst, EntrypointId::CREATE_ARRAY, dst, RegMask::GetZeroMask(), src_class, src_size); if (inst->GetFlag(inst_flags::MEM_BARRIER)) { encoder->EncodeMemoryBarrier(memory_order::RELEASE); } @@ -3009,7 +2754,7 @@ void Codegen::VisitNewArray(Inst *inst) array_size = len_inst->CastToConstant()->GetIntValue() * element_size + class_array_size; array_size = (array_size & ~(alignment - 1U)) + ((array_size % alignment) != 0U ? alignment : 0U); if (array_size > max_tlab_size) { - CallRuntime(inst, EntrypointId::CREATE_ARRAY, dst, {src_class, src_size}); + CallRuntime(inst, EntrypointId::CREATE_ARRAY, dst, RegMask::GetZeroMask(), src_class, src_size); if (inst->GetFlag(inst_flags::MEM_BARRIER)) { encoder->EncodeMemoryBarrier(memory_order::RELEASE); } @@ -3040,7 +2785,7 @@ void Codegen::VisitNewArray(Inst *inst) UNREACHABLE(); } - FillCallParams({src_class, src_size}); + FillCallParams(src_class, src_size); MemRef entry(ThreadReg(), GetRuntime()->GetEntrypointTlsOffset(GetArch(), eid)); encoder->MakeCall(entry); CreateStackMap(inst); @@ -3071,18 +2816,6 @@ void EncodeVisitor::VisitParameter(GraphVisitor *visitor, Inst *inst) auto param_inst = inst->CastToParameter(); auto sf = param_inst->GetLocationData(); - if (codegen->GetGraph()->GetMode().SupportManagedCode() && codegen->GetGraph()->IsDynamicMethod() && - param_inst->GetArgNumber() > 0) { - // In dynamic methods only the first parameter is mandatory - // The rest parameters are optional. Actual number of passed parameters is known only - // in runtime and is located in the 'r1' register. - // All declared parameters which are not mapped to actual parameters must have - // special value 'undefined'. That is why we have special handling of parameters - // for dynamic methods. - VisitDynamicMethodParameter(visitor, inst); - return; - } - if (sf.GetSrc() == sf.GetDst()) { return; } @@ -3092,83 +2825,6 @@ void EncodeVisitor::VisitParameter(GraphVisitor *visitor, Inst *inst) SpillFillEncoder(codegen, tmp_sf).EncodeSpillFill(); } -void EncodeVisitor::VisitDynamicMethodParameter(GraphVisitor *visitor, Inst *inst) -{ - auto *enc = static_cast(visitor); - auto param_inst = inst->CastToParameter(); - ASSERT(param_inst->GetType() == DataType::ANY); - Codegen *codegen = enc->GetCodegen(); - Encoder *encoder = enc->GetEncoder(); - - SCOPED_DISASM_STR(codegen, "VisitParameter"); - - auto sf = param_inst->GetLocationData(); - ASSERT(sf.DstValue() != INVALID_REG); - Reg num_actual_args_reg = enc->GetCodegen()->GetTarget().GetParamReg(1); - - LabelHolder::LabelId else_label = encoder->CreateLabel(); - LabelHolder::LabelId end_label = encoder->CreateLabel(); - encoder->EncodeJump(else_label, num_actual_args_reg, Imm(param_inst->GetArgNumber()), Condition::LE); - HandleDynParamPassed(sf, enc); - encoder->EncodeJump(end_label); - encoder->BindLabel(else_label); - HandleDynParamNotPassed(sf, enc); - encoder->BindLabel(end_label); -} - -void EncodeVisitor::HandleDynParamPassed(const SpillFillData &sf, EncodeVisitor *enc) -{ - if (sf.GetSrc() == sf.GetDst()) { - return; - } - Codegen *codegen = enc->GetCodegen(); - Encoder *encoder = enc->GetEncoder(); - - if (sf.GetSrc().IsRegister()) { - auto src_reg = codegen->ConvertRegister(sf.SrcValue(), sf.GetType()); - if (sf.GetDst().IsRegister()) { // param_reg -> dst_reg - auto dst_reg = codegen->ConvertRegister(sf.DstValue(), DataType::ANY); - encoder->EncodeMov(dst_reg, src_reg); - } else { - ASSERT(sf.GetDst().IsAnyStack()); // param_reg -> push to slot - auto dst_mem = codegen->GetMemRefForSlot(sf.GetDst()); - encoder->EncodeStrz(src_reg, dst_mem); - } - return; - } - - ASSERT(sf.GetSrc().IsAnyStack()); - auto src_mem = codegen->GetMemRefForSlot(sf.GetSrc()); - if (sf.GetDst().IsRegister()) { // load from stack -> dst_reg - auto dst_reg = codegen->ConvertRegister(sf.DstValue(), sf.GetType()); - encoder->EncodeLdr(dst_reg, false, src_mem); - } else { - ASSERT(sf.GetDst().IsAnyStack()); // load from stack -> push to slot - auto dst_mem = codegen->GetMemRefForSlot(sf.GetDst()); - auto type_info = Codegen::ConvertDataType(sf.GetType(), codegen->GetArch()); - encoder->EncodeMemCopyz(src_mem, dst_mem, type_info.GetSize()); - } -} - -void EncodeVisitor::HandleDynParamNotPassed(const SpillFillData &sf, EncodeVisitor *enc) -{ - Codegen *codegen = enc->GetCodegen(); - Encoder *encoder = enc->GetEncoder(); - auto value = enc->cg_->GetGraph()->GetRuntime()->GetDynamicPrimitiveUndefined(); - - if (sf.GetDst().IsRegister()) { - Reg dst_val_reg = codegen->ConvertRegister(sf.DstValue(), DataType::INT64); - encoder->EncodeMov(dst_val_reg, Imm(value)); - return; - } - - ASSERT(sf.GetDst().IsAnyStack()); - auto dest_val = codegen->GetMemRefForSlot(sf.GetDst()); - ScopedTmpReg reg(encoder, INT64_TYPE); - encoder->EncodeMov(reg, Imm(value)); - encoder->EncodeStr(reg, dest_val); -} - void EncodeVisitor::VisitStoreArray(GraphVisitor *visitor, Inst *inst) { auto *enc = static_cast(visitor); @@ -3438,8 +3094,7 @@ void EncodeVisitor::VisitDeoptimizeCompareImm(GraphVisitor *visitor, Inst *inst) auto zreg = enc->GetRegfile()->GetZeroReg(); encoder->EncodeJump(slow_path->GetLabel(), src0, zreg, enc->GetCodegen()->ConvertCc(cc)); } else { - Imm imm = enc->GetCodegen()->ConvertImm(UINT64_C(0), type); - encoder->EncodeJump(slow_path->GetLabel(), src0, imm, enc->GetCodegen()->ConvertCc(cc)); + encoder->EncodeJump(slow_path->GetLabel(), src0, Imm(0), enc->GetCodegen()->ConvertCc(cc)); } return; } @@ -3483,7 +3138,8 @@ void EncodeVisitor::VisitLoadString(GraphVisitor *visitor, Inst *inst) encoder->EncodeMov(dst, Imm(string_ptr)); EVENT_JIT_USE_RESOLVED_STRING(graph->GetRuntime()->GetMethodName(method), string_type); } else { - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::RESOLVE_STRING, dst, Imm(string_type)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::RESOLVE_STRING, dst, + TypedImm(string_type)); } } @@ -3530,7 +3186,8 @@ void EncodeVisitor::VisitUnresolvedLoadObject(GraphVisitor *visitor, Inst *inst) ScopedTmpReg tmp_reg(enc->GetEncoder()); if (graph->IsAotMode()) { // TODO(zhroma): consider caching ? - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::GET_FIELD_OFFSET, tmp_reg, Imm(type_id)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, EntrypointId::GET_FIELD_OFFSET, tmp_reg, + TypedImm(type_id)); enc->GetEncoder()->EncodeAdd(tmp_reg, src, tmp_reg); // Unknown load, assume it can be volatile enc->GetEncoder()->EncodeLdrAcquire(dst, IsTypeSigned(type), MemRef(tmp_reg)); @@ -3655,7 +3312,7 @@ void EncodeVisitor::VisitUnresolvedStoreObject(GraphVisitor *visitor, Inst *inst if (graph->IsAotMode()) { // TODO(zhroma): consider caching ? enc->GetCodegen()->CallRuntimeWithMethod(inst, method, RuntimeInterface::EntrypointId::GET_FIELD_OFFSET, - tmp_reg, Imm(type_id)); + tmp_reg, TypedImm(type_id)); } else { auto skind = UnresolvedTypesInterface::SlotKind::FIELD; auto field_offset_addr = graph->GetRuntime()->GetUnresolvedTypes()->GetTableSlot(method, type_id, skind); @@ -3830,13 +3487,13 @@ void EncodeVisitor::VisitGetGlobalVarAddress(GraphVisitor *visitor, Inst *inst) auto label = encoder->CreateLabel(); encoder->EncodeJump(label, dst, Condition::NE); - enc->GetCodegen()->CallRuntime(inst, entrypoint_id, dst, {Imm(id)}); + enc->GetCodegen()->CallRuntime(inst, entrypoint_id, dst, RegMask::GetZeroMask(), TypedImm(id)); encoder->EncodeStr(dst, MemRef(addr)); encoder->BindLabel(label); } else { auto address = runtime->GetGlobalVarAddress(graph->GetMethod(), id); if (address == 0) { - enc->GetCodegen()->CallRuntime(inst, entrypoint_id, dst, {Imm(id)}); + enc->GetCodegen()->CallRuntime(inst, entrypoint_id, dst, RegMask::GetZeroMask(), TypedImm(id)); } else { encoder->EncodeMov(dst, Imm(address)); } @@ -3996,12 +3653,12 @@ void EncodeVisitor::VisitUnresolvedLoadStatic(GraphVisitor *visitor, Inst *inst) // We can't use dst for Load (src - used for get Ref-type size) if ((dst.GetSize() < ref_reg.GetSize()) || !dst.IsScalar()) { ScopedTmpReg tmp_reg(enc->GetEncoder()); - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, tmp_reg, Imm(type_id), Imm(0)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, tmp_reg, TypedImm(type_id), TypedImm(0)); enc->GetEncoder()->EncodeLdrAcquire(dst, IsTypeSigned(type), MemRef(tmp_reg)); return; } // TODO(zhroma): consider caching ? - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, dst, Imm(type_id), Imm(0)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, dst, TypedImm(type_id), TypedImm(0)); } else { auto skind = UnresolvedTypesInterface::SlotKind::FIELD; auto field_addr = graph->GetRuntime()->GetUnresolvedTypes()->GetTableSlot(method, type_id, skind); @@ -4107,7 +3764,8 @@ static void GenUnresolvedStoreStaticBarrierAot(EncodeVisitor *enc, Inst *inst, G if (slow) { entrypoint = RuntimeInterface::EntrypointId::GET_UNKNOWN_STATIC_FIELD_MEMORY_ADDRESS; enc->GetCodegen()->LoadMethod(tmp_reg); - enc->GetCodegen()->CallRuntime(inst, entrypoint, tmp_reg, {tmp_reg, Imm(type_id), Imm(0)}); + enc->GetCodegen()->CallRuntime(inst, entrypoint, tmp_reg, RegMask::GetZeroMask(), tmp_reg, TypedImm(type_id), + TypedImm(0)); auto mem = MemRef(tmp_reg); enc->GetCodegen()->CreatePreWRB(inst, mem); @@ -4116,7 +3774,8 @@ static void GenUnresolvedStoreStaticBarrierAot(EncodeVisitor *enc, Inst *inst, G } // TODO(zhroma): consider caching ? enc->GetCodegen()->LoadMethod(tmp_reg); - enc->GetCodegen()->CallRuntime(inst, entrypoint, tmp_reg, {tmp_reg, Imm(type_id), Imm(0)}); + enc->GetCodegen()->CallRuntime(inst, entrypoint, tmp_reg, RegMask::GetZeroMask(), tmp_reg, TypedImm(type_id), + TypedImm(0)); FinishUnresolvedStoreStatic(enc, inst, graph, tmp_reg, !slow); } @@ -4226,7 +3885,7 @@ void EncodeVisitor::VisitUnresolvedStoreStatic(GraphVisitor *visitor, Inst *inst if (graph->IsAotMode()) { // TODO(zhroma): consider caching ? - enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, tmp_reg, Imm(type_id), Imm(0)); + enc->GetCodegen()->CallRuntimeWithMethod(inst, method, entrypoint, tmp_reg, TypedImm(type_id), TypedImm(0)); } else { auto skind = UnresolvedTypesInterface::SlotKind::FIELD; auto field_addr = graph->GetRuntime()->GetUnresolvedTypes()->GetTableSlot(method, type_id, skind); @@ -4792,7 +4451,7 @@ void Codegen::CreateMonitorCall(MonitorInst *inst) SaveCallerRegisters(param_regs, VRegMask(), false); - FillCallParams({src}); + FillCallParams(src); MemRef entry(ThreadReg(), GetRuntime()->GetEntrypointTlsOffset(GetArch(), id)); GetEncoder()->MakeCall(entry); CreateStackMap(inst); @@ -4804,7 +4463,7 @@ void Codegen::CreateMonitorCallOld(MonitorInst *inst) auto src = ConvertRegister(inst->GetSrcReg(0), DataType::REFERENCE); // obj auto dst = ConvertRegister(inst->GetDstReg(), inst->GetType()); auto id = inst->IsExit() ? EntrypointId::UNLOCK_OBJECT : EntrypointId::LOCK_OBJECT; - CallRuntime(inst, id, dst, {src}); + CallRuntime(inst, id, dst, RegMask::GetZeroMask(), src); } void Codegen::CreateCheckCastInterfaceCall(Inst *inst) @@ -4815,7 +4474,7 @@ void Codegen::CreateCheckCastInterfaceCall(Inst *inst) SaveCallerRegisters(param_regs, VRegMask(), false); auto obj = ConvertRegister(inst->GetSrcReg(0), DataType::REFERENCE); auto interface = ConvertRegister(inst->GetSrcReg(SECOND_OPERAND), DataType::REFERENCE); - FillCallParams({obj, interface}); + FillCallParams(obj, interface); MemRef entry(ThreadReg(), GetRuntime()->GetEntrypointTlsOffset(GetArch(), EntrypointId::CHECK_CAST_INTERFACE)); enc->MakeCall(entry); CreateStackMap(inst); @@ -4906,8 +4565,7 @@ void EncodeVisitor::VisitBoundsCheckI(GraphVisitor *visitor, Inst *inst) auto value = inst->CastToBoundsCheckI()->GetImm(); if (enc->GetEncoder()->CanEncodeImmAddSubCmp(value, WORD_SIZE, false)) { - auto imm = enc->GetCodegen()->ConvertImm(value, DataType::INT64); - enc->GetEncoder()->EncodeJump(slow_path->GetLabel(), len_reg, imm, Condition::LS); + enc->GetEncoder()->EncodeJump(slow_path->GetLabel(), len_reg, Imm(value), Condition::LS); } else { ScopedTmpReg tmp(enc->GetEncoder(), len_reg.GetType()); enc->GetEncoder()->EncodeMov(tmp, Imm(value)); @@ -5095,7 +4753,7 @@ void EncodeVisitor::VisitSelectImm(GraphVisitor *visitor, Inst *inst) auto src1 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(1), dst_type); constexpr int32_t IMM_2 = 2; auto src2 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(IMM_2), cmp_type); - auto imm = enc->GetCodegen()->ConvertImm(inst->CastToSelectImm()->GetImm(), cmp_type); + auto imm = Imm(inst->CastToSelectImm()->GetImm()); auto cc = enc->GetCodegen()->ConvertCc(inst->CastToSelectImm()->GetCc()); if (IsTestCc(cc)) { enc->GetEncoder()->EncodeSelectTest(dst, src0, src1, src2, imm, cc); @@ -5131,7 +4789,7 @@ void EncodeVisitor::VisitIfImm(GraphVisitor *visitor, Inst *inst) auto type = inst->CastToIfImm()->GetOperandsType(); auto src0 = enc->GetCodegen()->ConvertRegister(inst->GetSrcReg(0), type); - auto imm = enc->GetCodegen()->ConvertImm(inst->CastToIfImm()->GetImm(), DataType::INT64); + auto imm = Imm(inst->CastToIfImm()->GetImm()); auto cc = enc->GetCodegen()->ConvertCc(inst->CastToIfImm()->GetCc()); if (IsTestCc(cc)) { enc->GetEncoder()->EncodeJumpTest(label, src0, imm, cc); diff --git a/compiler/optimizer/code_generator/codegen.h b/compiler/optimizer/code_generator/codegen.h index 640ed8927f663f9fcbec122373746d6458c935b7..7f5a4c93273e3b8e7de81c94d88b8f6047fb9461 100644 --- a/compiler/optimizer/code_generator/codegen.h +++ b/compiler/optimizer/code_generator/codegen.h @@ -134,12 +134,8 @@ public: Reg ConvertRegister(Register ref, DataType::Type type = DataType::Type::INT64); - Imm ConvertImm(uint64_t imm, DataType::Type type); - Imm ConvertImmWithExtend(uint64_t imm, DataType::Type type); - Imm ConvertImm(ConstantInst *const_inst, DataType::Type type); - Condition ConvertCc(ConditionCode cc); Condition ConvertCcOverflow(ConditionCode cc); @@ -228,14 +224,96 @@ public: void EmitSlowPaths(); - void InsertTrace(std::initializer_list> params); + /** + * Insert tracing code to the generated code. See `Trace` method in the `runtime/entrypoints.cpp`. + * TODO(compiler): we should rework parameters assigning algorithm, that is duplicated here. + * @param params parameters to be passed to the TRACE entrypoint, first parameter must be TraceId value. + */ + template + void InsertTrace(Args &&...params) + { + SCOPED_DISASM_STR(this, "Trace"); + [[maybe_unused]] constexpr size_t MAX_PARAM_NUM = 8; + static_assert(sizeof...(Args) <= MAX_PARAM_NUM); + auto regfile = GetRegfile(); + auto save_regs = regfile->GetCallerSavedRegMask(); + save_regs.set(GetTarget().GetReturnRegId()); + auto save_vregs = regfile->GetCallerSavedVRegMask(); + save_vregs.set(GetTarget().GetReturnFpRegId()); + + SaveCallerRegisters(save_regs, save_vregs, false); + FillCallParams(std::forward(params)...); + EmitCallRuntimeCode(nullptr, EntrypointId::TRACE); + LoadCallerRegisters(save_regs, save_vregs, false); + } void CallIntrinsic(Inst *inst, RuntimeInterface::IntrinsicId id); // The function is used for calling runtime functions through special bridges. // !NOTE Don't use the function for calling runtime without bridges(it save only parameters on stack) - void CallRuntime(Inst *inst, EntrypointId id, Reg dst_reg, std::initializer_list> params, - RegMask preserved_regs = {}); + template + void CallRuntime(Inst *inst, EntrypointId id, Reg dst_reg, RegMask preserved_regs, Args &&...params) + { + ASSERT(inst != nullptr); + CHECK_EQ(sizeof...(Args), GetRuntime()->GetEntrypointArgsNum(id)); + if (GetArch() == Arch::AARCH32) { + // There is a problem with 64-bit parameters: + // params number passed from entrypoints_gen.S.erb will be inconsistent with Aarch32 ABI. + // Thus, runtime bridges will have wrong params number (\paramsnum macro argument). + ASSERT(EnsureParamsFitIn32Bit({params...})); + ASSERT(!dst_reg.IsValid() || dst_reg.GetSize() <= WORD_SIZE); + } + + SCOPED_DISASM_STR(this, std::string("CallRuntime: ") + GetRuntime()->GetEntrypointName(id)); + RegMask live_regs {preserved_regs | GetLiveRegisters(inst).first}; + RegMask params_mask; + if (inst->HasImplicitRuntimeCall() && !GetRuntime()->IsEntrypointNoreturn(id)) { + SaveRegistersForImplicitRuntime(inst, ¶ms_mask, &live_regs); + } + // parameter regs: their initial values must be stored by the caller + // Other caller regs stored in bridges + FillOnlyParameters(&live_regs, sizeof...(Args)); + SaveCallerRegisters(live_regs, VRegMask(), true); + + if (sizeof...(Args) != 0) { + FillCallParams(std::forward(params)...); + } + + // Call Code + if (!EmitCallRuntimeCode(inst, id)) { + return; + } + if (dst_reg.IsValid()) { + ASSERT(dst_reg.IsScalar()); + Reg ret_val = GetTarget().GetReturnReg(dst_reg.GetType()); + if (dst_reg.GetId() != ret_val.GetId()) { + GetEncoder()->SetRegister(&live_regs, nullptr, ret_val, true); + } + + // We must: + // sign extended INT8 and INT16 to INT32 + // zero extended UINT8 and UINT16 to UINT32 + if (dst_reg.GetSize() < WORD_SIZE) { + bool is_signed = DataType::IsTypeSigned(inst->GetType()); + GetEncoder()->EncodeCast(dst_reg, is_signed, ret_val, is_signed); + } else { + GetEncoder()->EncodeMov(dst_reg, ret_val); + } + } + + GetEncoder()->SetRegister(&live_regs, nullptr, dst_reg, false); + LoadCallerRegisters(live_regs, VRegMask(), true); + + if (!inst->HasImplicitRuntimeCall()) { + return; + } + ASSERT(!GetRuntime()->IsEntrypointNoreturn(id)); + for (auto i = 0U; i < params_mask.size(); i++) { + if (params_mask.test(i)) { + inst->GetSaveState()->GetRootsRegsMask().reset(i); + } + } + } template void CallRuntimeWithMethod(Inst *inst, void *method, EntrypointId eid, Reg dst_reg, Args &&...params) @@ -243,13 +321,15 @@ public: if (GetGraph()->IsAotMode()) { ScopedTmpReg method_reg(GetEncoder()); LoadMethod(method_reg); - CallRuntime(inst, eid, dst_reg, {method_reg, params...}); + CallRuntime(inst, eid, dst_reg, RegMask::GetZeroMask(), method_reg, std::forward(params)...); } else { if (Is64BitsArch(GetArch())) { - CallRuntime(inst, eid, dst_reg, {Imm(reinterpret_cast(method)), params...}); + CallRuntime(inst, eid, dst_reg, RegMask::GetZeroMask(), TypedImm(reinterpret_cast(method)), + std::forward(params)...); } else { // uintptr_t causes problems on host cross-jit compilation - CallRuntime(inst, eid, dst_reg, {Imm(down_cast(method)), params...}); + CallRuntime(inst, eid, dst_reg, RegMask::GetZeroMask(), TypedImm(down_cast(method)), + std::forward(params)...); } } } @@ -279,8 +359,15 @@ public: void EncodePostWRB(Inst *inst, MemRef mem, Reg reg1, Reg reg2, bool check_object = true); void CreatePostInterRegionBarrier(Inst *inst, MemRef mem, Reg reg1, Reg reg2, bool check_object); void CreatePostInterGenerationalBarrier(MemRef mem); - void CallBarrier(RegMask live_regs, VRegMask live_vregs, EntrypointId id, - const std::initializer_list> ¶ms); + template + void CallBarrier(RegMask live_regs, VRegMask live_vregs, EntrypointId id, Args &&...params) + { + SaveCallerRegisters(live_regs, live_vregs, true); + FillCallParams(std::forward(params)...); + EmitCallRuntimeCode(nullptr, id); + LoadCallerRegisters(live_regs, live_vregs, true); + } + void CreateLoadClassFromPLT(Inst *inst, Reg tmp_reg, Reg dst, size_t class_id); void CreateJumpToClassResolverPltShared(Inst *inst, Reg tmp_reg, RuntimeInterface::EntrypointId id); void CreateLoadTLABInformation(Reg reg_tlab_start, Reg reg_tlab_size); @@ -396,7 +483,14 @@ protected: return used_vregs_; } - void FillCallParams(const std::initializer_list> ¶ms); + template + void FillCallParams(Args &&...params); + + template + ALWAYS_INLINE inline void FillCallParamsHandleOperands( + ParameterInfo *param_info, SpillFillInst *reg_moves, ArenaVector *sp_moves, + [[maybe_unused]] typename std::array, IMM_ARRAY_SIZE>::iterator imms_iter, Arg &&arg, + Args &&...params); void EmitJump(const BasicBlock *bb); @@ -432,11 +526,69 @@ protected: // implementation is generated with compiler/optimizer/templates/intrinsics/intrinsics_codegen.inl.erb void FillBuiltin(IntrinsicInst *inst, SRCREGS src, Reg dst); - void AddParamRegsInLiveMasks(RegMask *live_regs, VRegMask *live_vregs, - const std::initializer_list> ¶ms); + template + ALWAYS_INLINE inline void AddParamRegsInLiveMasksHandleArgs(ParameterInfo *param_info, RegMask *live_regs, + VRegMask *live_vregs, Arg param, Args &&...params) + { + auto curr_dst = param_info->GetNativeParam(param.GetType()); + if (std::holds_alternative(curr_dst)) { + auto reg = std::get(curr_dst); + if (reg.IsScalar()) { + live_regs->set(reg.GetId()); + } else { + live_vregs->set(reg.GetId()); + } + } else { + GetEncoder()->SetFalseResult(); + UNREACHABLE(); + } + if constexpr (sizeof...(Args) != 0) { + AddParamRegsInLiveMasksHandleArgs(param_info, live_regs, live_vregs, std::forward(params)...); + } + } + + template + void AddParamRegsInLiveMasks(RegMask *live_regs, VRegMask *live_vregs, Args &&...params) + { + auto callconv = GetCallingConvention(); + auto param_info = callconv->GetParameterInfo(0); + AddParamRegsInLiveMasksHandleArgs(param_info, live_regs, live_vregs, std::forward(params)...); + } - void CreateStubCall(Inst *inst, RuntimeInterface::IntrinsicId intrinsic_id, Reg dst, - const std::initializer_list> ¶ms); + template + void CreateStubCall(Inst *inst, RuntimeInterface::IntrinsicId intrinsic_id, Reg dst, Args &&...params) + { + VRegMask live_vregs; + RegMask live_regs; + AddParamRegsInLiveMasks(&live_regs, &live_vregs, params...); + auto enc = GetEncoder(); + { + SCOPED_DISASM_STR(this, "Save caller saved regs"); + SaveCallerRegisters(live_regs, live_vregs, true); + } + + FillCallParams(std::forward(params)...); + CallIntrinsic(inst, intrinsic_id); + + if (inst->GetSaveState() != nullptr) { + CreateStackMap(inst); + } + + if (dst.IsValid()) { + Reg ret_val = GetTarget().GetReturnReg(dst.GetType()); + if (dst.GetId() != ret_val.GetId()) { + enc->SetRegister(&live_regs, &live_vregs, ret_val, true); + } + ASSERT(dst.IsScalar()); + enc->EncodeMov(dst, ret_val); + } + + { + SCOPED_DISASM_STR(this, "Restore caller saved regs"); + enc->SetRegister(&live_regs, &live_vregs, dst, false); + LoadCallerRegisters(live_regs, live_vregs, true); + } + } ScopedTmpReg CalculatePreviousTLABAllocSize(Reg reg, LabelHolder::LabelId label); friend class IntrinsicCodegenTest; @@ -479,6 +631,8 @@ private: } } + [[maybe_unused]] bool EnsureParamsFitIn32Bit(std::initializer_list> params); + private: ArenaAllocator *allocator_; ArenaAllocator *local_allocator_; @@ -531,9 +685,6 @@ private: friend class EncodeVisitor; friend class BaselineCodegen; friend class SlowPathJsCastDoubleToInt32; - - void CreateStubCall(RuntimeInterface::IntrinsicId intrinsic_id, Reg dst, - const std::initializer_list> ¶ms); }; // Codegen class EncodeVisitor : public GraphVisitor { @@ -874,9 +1025,6 @@ protected: #include "optimizer/ir/visitor.inc" private: - static void VisitDynamicMethodParameter(GraphVisitor *visitor, Inst *inst); - static void HandleDynParamPassed(const SpillFillData &sf, EncodeVisitor *enc); - static void HandleDynParamNotPassed(const SpillFillData &sf, EncodeVisitor *enc); static void CastToAny(GraphVisitor *visitor, Inst *inst); private: @@ -885,6 +1033,90 @@ private: bool success_ {true}; }; // EncodeVisitor +template +ALWAYS_INLINE inline void Codegen::FillCallParamsHandleOperands( + ParameterInfo *param_info, SpillFillInst *reg_moves, ArenaVector *sp_moves, + [[maybe_unused]] typename std::array, IMM_ARRAY_SIZE>::iterator imms_iter, Arg &&arg, + Args &&...params) +{ + Location dst; + auto type = arg.GetType().ToDataType(); + dst = param_info->GetNextLocation(type); + if (dst.IsStackArgument()) { + GetEncoder()->SetFalseResult(); + UNREACHABLE(); // Move to BoundaryFrame + } + + static_assert(std::is_same_v, TypedImm> || std::is_convertible_v); + if constexpr (std::is_same_v, TypedImm>) { + auto reg = ConvertRegister(dst.GetValue(), type); + *imms_iter = {reg, arg.GetImm()}; + imms_iter++; + } else { + Reg reg(std::forward(arg)); + if (reg == SpReg()) { + // SP should be handled separately, since on the ARM64 target it has ID out of range + sp_moves->emplace_back(ConvertRegister(dst.GetValue(), type)); + } else { + reg_moves->AddSpillFill(Location::MakeRegister(reg.GetId(), type), dst, type); + } + } + if constexpr (sizeof...(Args) != 0) { + FillCallParamsHandleOperands(param_info, reg_moves, sp_moves, imms_iter, + std::forward(params)...); + } +} + +template +constexpr std::pair CountParameters() +{ + static_assert(std::is_same_v, TypedImm> != std::is_convertible_v); + if constexpr (sizeof...(Args) != 0) { + constexpr auto IMM_REG_COUNT = CountParameters(); + + if constexpr (std::is_same_v, TypedImm>) { + return {IMM_REG_COUNT.first + 1, IMM_REG_COUNT.second}; + } else if constexpr (std::is_convertible_v) { + return {IMM_REG_COUNT.first, IMM_REG_COUNT.second + 1}; + } + } + return {std::is_same_v, TypedImm>, std::is_convertible_v}; +} + +template +void Codegen::FillCallParams(Args &&...params) +{ + SCOPED_DISASM_STR(this, "FillCallParams"); + if constexpr (sizeof...(Args) != 0) { + constexpr size_t IMMEDIATES_COUNT = CountParameters().first; + constexpr size_t REGS_COUNT = CountParameters().second; + // Native call - do not add reserve parameters + auto param_info = GetCallingConvention()->GetParameterInfo(0); + std::array, IMMEDIATES_COUNT> immediates {}; + ArenaVector sp_moves(GetLocalAllocator()->Adapter()); + auto reg_moves = GetGraph()->CreateInstSpillFill(); + sp_moves.reserve(REGS_COUNT); + reg_moves->GetSpillFills().reserve(REGS_COUNT); + + FillCallParamsHandleOperands(param_info, reg_moves, &sp_moves, immediates.begin(), + std::forward(params)...); + + // Resolve registers move order and encode + spill_fills_resolver_.ResolveIfRequired(reg_moves); + SpillFillEncoder(this, reg_moves).EncodeSpillFill(); + + // Encode immediates moves + for (auto &imm_values : immediates) { + GetEncoder()->EncodeMov(imm_values.first, imm_values.second); + } + + // Encode moves from SP reg + for (auto dst : sp_moves) { + GetEncoder()->EncodeMov(dst, SpReg()); + } + } +} + } // namespace panda::compiler #endif // COMPILER_OPTIMIZER_CODEGEN_CODEGEN_H_ diff --git a/compiler/optimizer/code_generator/codegen_native.cpp b/compiler/optimizer/code_generator/codegen_native.cpp index 446878ad8dbdf4e611cf5cd53a6c12ff17a9c3c9..0795bd3c17b4d3798e7ac4adb0b5f2dd25425031 100644 --- a/compiler/optimizer/code_generator/codegen_native.cpp +++ b/compiler/optimizer/code_generator/codegen_native.cpp @@ -58,7 +58,7 @@ void CodegenNative::GeneratePrologue() GetCallingConvention()->GenerateNativePrologue(*GetFrameInfo()); if (GetGraph()->IsDynamicMethod()) { - GenerateExtensionsForPrologue(); + GenerateExtensionsForPrologue(GetRuntime()->GetMethodSourceLanguage(GetGraph()->GetMethod())); } #if defined(EVENT_METHOD_ENTER_ENABLED) && EVENT_METHOD_ENTER_ENABLED != 0 @@ -66,12 +66,12 @@ void CodegenNative::GeneratePrologue() SCOPED_DISASM_STR(this, "LoadMethod for trace"); ScopedTmpReg method_reg(GetEncoder()); LoadMethod(method_reg); - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), method_reg, - Imm(static_cast(events::MethodEnterKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), method_reg, + Imm(static_cast(events::MethodEnterKind::COMPILED))); } else { - InsertTrace({Imm(static_cast(TraceId::METHOD_ENTER)), - Imm(reinterpret_cast(GetGraph()->GetMethod())), - Imm(static_cast(events::MethodEnterKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_ENTER)), + Imm(reinterpret_cast(GetGraph()->GetMethod())), + Imm(static_cast(events::MethodEnterKind::COMPILED))); } #endif } @@ -82,7 +82,7 @@ void CodegenNative::GenerateEpilogue() SCOPED_DISASM_STR(this, "Method Epilogue"); if (GetGraph()->IsDynamicMethod()) { - GenerateExtensionsForEpilogue(); + GenerateExtensionsForEpilogue(GetRuntime()->GetMethodSourceLanguage(GetGraph()->GetMethod())); } #if defined(EVENT_METHOD_EXIT_ENABLED) && EVENT_METHOD_EXIT_ENABLED != 0 @@ -90,12 +90,12 @@ void CodegenNative::GenerateEpilogue() if (GetGraph()->IsAotMode()) { ScopedTmpReg method_reg(GetEncoder()); LoadMethod(method_reg); - InsertTrace({Imm(static_cast(TraceId::METHOD_EXIT)), method_reg, - Imm(static_cast(events::MethodExitKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_EXIT)), method_reg, + Imm(static_cast(events::MethodExitKind::COMPILED))); } else { - InsertTrace({Imm(static_cast(TraceId::METHOD_EXIT)), - Imm(reinterpret_cast(GetGraph()->GetMethod())), - Imm(static_cast(events::MethodExitKind::COMPILED))}); + InsertTrace(Imm(static_cast(TraceId::METHOD_EXIT)), + Imm(reinterpret_cast(GetGraph()->GetMethod())), + Imm(static_cast(events::MethodExitKind::COMPILED))); } }); #else diff --git a/compiler/optimizer/code_generator/encode.h b/compiler/optimizer/code_generator/encode.h index 4e67dd4bda264ef1efd6d34686314cf7a2132e36..be783044ce8219a8c12a375d3f48036b48468536 100644 --- a/compiler/optimizer/code_generator/encode.h +++ b/compiler/optimizer/code_generator/encode.h @@ -283,7 +283,16 @@ public: { SetFalseResult(); } - virtual void EncodeSti([[maybe_unused]] Imm src, [[maybe_unused]] MemRef mem) + virtual void EncodeSti([[maybe_unused]] int64_t src, [[maybe_unused]] uint8_t src_size_bytes, + [[maybe_unused]] MemRef mem) + { + SetFalseResult(); + } + virtual void EncodeSti([[maybe_unused]] double src, [[maybe_unused]] MemRef mem) + { + SetFalseResult(); + } + virtual void EncodeSti([[maybe_unused]] float src, [[maybe_unused]] MemRef mem) { SetFalseResult(); } @@ -1112,6 +1121,11 @@ public: return reg_; } + TypeInfo GetType() const + { + return reg_.GetType(); + } + // NOLINTNEXTLINE(google-explicit-constructor) operator Reg() const { diff --git a/compiler/optimizer/code_generator/encoder.md b/compiler/optimizer/code_generator/encoder.md index acb1da455f11294dc5dafdd5b2479f98a33873d7..33e99fecc584e22ac9b78ad1d7ee3af6bbba05bc 100644 --- a/compiler/optimizer/code_generator/encoder.md +++ b/compiler/optimizer/code_generator/encoder.md @@ -101,15 +101,25 @@ Example: ### Immediate -Class **Imm** contains value of the following types: int8_t, int16_t, int32_t, int64_t, float, double -You can get next information about immediate: type, value, size. +Class **Imm** contains 64-bit storage for the immediate, it doesn't contain any information about it's type. It is intended to store both float and integer types. + +A user is responsible for correct type deduction. Commonly, the type can be deduced by a register that accompanies the immediate in an instruction. Example: ``` - double value = 123.456; - auto imm = Imm(value); // double immediate - ASSERT(imm.GetValue() == value && imm->GetType() == TypeInfo(FLOAT64) && imm->GetSize() == 64 && - !imm->IsScalar()); + EncodeInst(Reg src, Imm imm) + { + if (src.GetType() == FLOAT32_TYPE) { + float val = imm.GetAsFloat(); + ... + } else if (src.GetType() == FLOAT64_TYPE) { + double val = imm.GetAsDouble(); + ... + } else { + int64_t val = imm.GetAsInt(); + ... + } + } ``` ### Memory @@ -126,7 +136,7 @@ Example: auto mem_disp = MemRef(base_reg, disp); // base_reg + disp ASSERT(mem_disp.HasBase() && !mem_disp.HasIndex() && !mem_disp.HasScale() && mem_disp.HasDisp()); ASSERT(mem_disp.GetBase() == base_reg && mem_disp.GetDisp() == disp); - ASSERT(mem_disp.GetIndex() == INVALID_REGISTER && mem_disp.GetScale() == INVALID_IMM); + ASSERT(mem_disp.GetIndex() == INVALID_REGISTER && mem_disp.GetScale() == 0); // memory with base , index registers and scale auto base_reg = Reg(5, TypeInfo(INT64)); @@ -135,7 +145,7 @@ Example: auto mem_scale = MemRef(base_reg, index_reg, scale); // base_reg + (index_reg << scale) ASSERT(mem_scale.HasBase() && mem_scale.HasIndex() && mem_scale.HasScale() && !mem_scale.HasDisp()); ASSERT(mem_scale.GetBase() == base_reg && mem_scale.GetIndex() == index_reg && mem_scale.GetScale() == scale); - ASSERT(mem_scale.GetDisp() == INVALID_IMM); + ASSERT(mem_scale.GetDisp() == 0); ``` ## Code Example (WIP) diff --git a/compiler/optimizer/code_generator/operands.h b/compiler/optimizer/code_generator/operands.h index 903a221de10ea521ed4c60d06ce0d449366c64b5..63ae27a384bd7b1a7bd8e73ff5e7f029fc74c6bd 100644 --- a/compiler/optimizer/code_generator/operands.h +++ b/compiler/optimizer/code_generator/operands.h @@ -21,6 +21,7 @@ Arch-feature definitions */ #include #include +#include #include "utils/arch.h" #include "utils/arena_containers.h" @@ -42,9 +43,10 @@ constexpr uint8_t BYTE_SIZE = 8; constexpr uint8_t HALF_SIZE = 16; constexpr uint8_t WORD_SIZE = 32; constexpr uint8_t DOUBLE_WORD_SIZE = 64; -constexpr uint8_t WORD_SIZE_BYTE = 4; -constexpr uint8_t DOUBLE_WORD_SIZE_BYTE = 8; -constexpr uint8_t QUAD_WORD_SIZE_BYTE = 16; +constexpr uint8_t HALF_WORD_SIZE_BYTES = 2; +constexpr uint8_t WORD_SIZE_BYTES = 4; +constexpr uint8_t DOUBLE_WORD_SIZE_BYTES = 8; +constexpr uint8_t QUAD_WORD_SIZE_BYTES = 16; /// Maximum possible registers count (for scalar and for vector): constexpr uint8_t MAX_NUM_REGS = 32; constexpr uint8_t MAX_NUM_VREGS = 32; @@ -124,6 +126,7 @@ public: constexpr explicit TypeInfo(T /* unused */) { #ifndef __clang_analyzer__ + static_assert(std::is_arithmetic_v); if constexpr (std::is_same()) { type_id_ = INT8; } else if constexpr (std::is_same()) { @@ -315,14 +318,14 @@ constexpr TypeInfo FLOAT32_TYPE {TypeInfo::FLOAT32}; constexpr TypeInfo FLOAT64_TYPE {TypeInfo::FLOAT64}; constexpr TypeInfo INVALID_TYPE; -constexpr TypeInfo TypeInfo::GetScalarTypeBySize(size_t size) +constexpr TypeInfo TypeInfo::GetScalarTypeBySize(size_t size_bits) { auto type = INT64_TYPE; - if (size == BYTE_SIZE) { + if (size_bits == BYTE_SIZE) { type = INT8_TYPE; - } else if (size == HALF_SIZE) { + } else if (size_bits == HALF_SIZE) { type = INT16_TYPE; - } else if (size == WORD_SIZE) { + } else if (size_bits == WORD_SIZE) { type = INT32_TYPE; } return type; @@ -429,23 +432,94 @@ static_assert(sizeof(Reg) <= sizeof(uintptr_t)); * It knows nothing about pointers and bools (bools maybe be in future). */ class Imm final { - static inline constexpr uint8_t BITS_PER_BYTE = 8; static constexpr size_t UNDEFINED_SIZE = 0; - static constexpr size_t INT8_SIZE = 8; - static constexpr size_t INT16_SIZE = 16; - static constexpr size_t INT32_SIZE = 32; static constexpr size_t INT64_SIZE = 64; static constexpr size_t FLOAT32_SIZE = 32; static constexpr size_t FLOAT64_SIZE = 64; +public: + constexpr Imm() = default; + + template + constexpr explicit Imm(T value) : value_(static_cast(value)) + { + using Type = std::decay_t; + static_assert(std::is_integral_v || std::is_enum_v); + } + + // Partial template specialization + constexpr explicit Imm(int64_t value) : value_(value) {}; +#ifndef NDEBUG + constexpr explicit Imm(double value) : value_(value) {}; + constexpr explicit Imm(float value) : value_(value) {}; +#else + explicit Imm(double value) : value_(bit_cast(value)) {}; + explicit Imm(float value) : value_(bit_cast(value)) {}; +#endif // !NDEBUG + + DEFAULT_MOVE_SEMANTIC(Imm); + DEFAULT_COPY_SEMANTIC(Imm); + ~Imm() = default; + +#ifdef NDEBUG + constexpr int64_t GetAsInt() const + { + return value_; + } + + float GetAsFloat() const + { + return bit_cast(static_cast(value_)); + } + + double GetAsDouble() const + { + return bit_cast(value_); + } + + constexpr int64_t GetRawValue() const + { + return value_; + } + +#else + constexpr int64_t GetAsInt() const + { + ASSERT(std::holds_alternative(value_)); + return std::get(value_); + } + + float GetAsFloat() const + { + ASSERT(std::holds_alternative(value_)); + return std::get(value_); + } + + double GetAsDouble() const + { + ASSERT(std::holds_alternative(value_)); + return std::get(value_); + } + + constexpr int64_t GetRawValue() const + { + if (value_.index() == 0) { + UNREACHABLE(); + } else if (value_.index() == 1) { + return std::get(value_); + } else if (value_.index() == 2) { + return static_cast(bit_cast(std::get(value_))); + } else if (value_.index() == 3) { + return bit_cast(std::get(value_)); + } + UNREACHABLE(); + } + enum VariantID { - // Pointer used for invalidate variants - V_INT8 = 1, - V_INT16 = 2, - V_INT32 = 3, - V_INT64 = 4, - V_FLOAT32 = 5, - V_FLOAT64 = 6, + V_INVALID = 0, // Pointer used for invalidate variants + V_INT64 = 1, + V_FLOAT32 = 2, + V_FLOAT64 = 3 }; template @@ -454,16 +528,7 @@ class Imm final { #ifndef __clang_analyzer__ // Immediate could be only signed (int/float) // look at value_-type. - static_assert(std::is_signed::value); - if constexpr (std::is_same()) { - return value_.index() == V_INT8; - } - if constexpr (std::is_same()) { - return value_.index() == V_INT16; - } - if constexpr (std::is_same()) { - return value_.index() == V_INT32; - } + static_assert(std::is_signed_v); if constexpr (std::is_same()) { return value_.index() == V_INT64; } @@ -476,111 +541,17 @@ class Imm final { return false; #else return true; -#endif - } - -public: - // Invalid constructor - constexpr Imm() = default; - - // Special type constructor - template - constexpr explicit Imm(T value) : value_(value) - { +#endif // !__clang_analyzer__ } - // Partial template specialization - constexpr explicit Imm(uint8_t value) : value_(static_cast(value)) {}; - - constexpr explicit Imm(uint16_t value) : value_(static_cast(value)) {}; - - constexpr explicit Imm(uint32_t value) : value_(static_cast(value)) {}; - - constexpr explicit Imm(uint64_t value) : value_(static_cast(value)) {}; - -#if (PANDA_TARGET_MACOS) - constexpr explicit Imm(size_t value) : value_(static_cast(value)) {}; - - constexpr explicit Imm(long value) : value_(static_cast(value)) {}; -#endif - - DEFAULT_MOVE_SEMANTIC(Imm); - DEFAULT_COPY_SEMANTIC(Imm); - ~Imm() = default; - - template - T GetValue() const - { - ASSERT(CheckVariantID()); - ASSERT(sizeof(T) * BITS_PER_BYTE == GetSize()); - return std::get(value_); - } - - void Inc(size_t value) - { - switch (value_.index()) { - case V_INT8: - value_ = static_cast(std::get(value_) + value); - break; - case V_INT16: - value_ = static_cast(std::get(value_) + value); - break; - case V_INT32: - value_ = static_cast(std::get(value_) + value); - break; - case V_INT64: - value_ = static_cast(std::get(value_) + value); - break; - case V_FLOAT32: - value_ = static_cast(std::get(value_) + value); - break; - case V_FLOAT64: - value_ = static_cast(std::get(value_) + value); - break; - default: - // Check before increment - UNREACHABLE(); - break; - } - } - - void Dec(size_t value) + constexpr bool IsValid() const { - switch (value_.index()) { - case V_INT8: - value_ = static_cast(std::get(value_) - value); - break; - case V_INT16: - value_ = static_cast(std::get(value_) - value); - break; - case V_INT32: - value_ = static_cast(std::get(value_) - value); - break; - case V_INT64: - value_ = static_cast(std::get(value_) - value); - break; - case V_FLOAT32: - value_ = static_cast(std::get(value_) - value); - break; - case V_FLOAT64: - value_ = static_cast(std::get(value_) - value); - break; - default: - // Check before decrement - UNREACHABLE(); - break; - } + return !std::holds_alternative(value_); } TypeInfo GetType() const { switch (value_.index()) { - case V_INT8: - return INT8_TYPE; - case V_INT16: - return INT16_TYPE; - case V_INT32: - return INT32_TYPE; case V_INT64: return INT64_TYPE; case V_FLOAT32: @@ -596,12 +567,6 @@ public: constexpr size_t GetSize() const { switch (value_.index()) { - case V_INT8: - return INT8_SIZE; - case V_INT16: - return INT16_SIZE; - case V_INT32: - return INT32_SIZE; case V_INT64: return INT64_SIZE; case V_FLOAT32: @@ -609,85 +574,51 @@ public: case V_FLOAT64: return FLOAT64_SIZE; default: + UNREACHABLE(); return UNDEFINED_SIZE; } } +#endif // NDEBUG - bool IsZero() const - { - if (std::holds_alternative(value_)) { - return std::get(value_) == 0.0; - } - if (std::holds_alternative(value_)) { - return std::get(value_) == 0.0; - } - if (std::holds_alternative(value_)) { - return std::get(value_) == 0; - } - if (std::holds_alternative(value_)) { - return std::get(value_) == 0; - } - if (std::holds_alternative(value_)) { - return std::get(value_) == 0; - } - if (std::holds_alternative(value_)) { - return std::get(value_) == 0; - } - return true; - } - - bool IsFloat() const + bool operator==(Imm other) const { - return std::holds_alternative(value_) || std::holds_alternative(value_); + return value_ == other.value_; } - bool IsScalar() const + bool operator!=(Imm other) const { - return std::holds_alternative(value_) || std::holds_alternative(value_) || - std::holds_alternative(value_) || std::holds_alternative(value_); + return !(operator==(other)); } - bool IsValid() const - { - bool hold_data = std::holds_alternative(value_) || std::holds_alternative(value_) || - std::holds_alternative(value_) || std::holds_alternative(value_) || - std::holds_alternative(value_) || std::holds_alternative(value_); - return (GetSize() != 0) && hold_data; - } +private: +#ifndef NDEBUG + std::variant value_ {nullptr}; +#else + int64_t value_ {0}; +#endif // NDEBUG +}; // Imm - unsigned GetShift() +class TypedImm final { +public: + template + constexpr explicit TypedImm(T imm) : type_(imm), imm_(imm) { - if (GetType() == INT64_TYPE) { - return GetValue(); - } - if (GetType() == INT32_TYPE) { - return GetValue(); - } - if (GetType() == INT16_TYPE) { - return GetValue(); - } - if (GetType() == INT8_TYPE) { - return GetValue(); - } - UNREACHABLE(); - return 0; } - bool operator==(Imm other) const + TypeInfo GetType() const { - return value_ == other.value_; + return type_; } - bool operator!=(Imm other) const + Imm GetImm() const { - return !(operator==(other)); + return imm_; } private: - std::variant value_ {nullptr}; -}; // Imm - -constexpr Imm INVALID_IMM = Imm(); + TypeInfo type_ {INVALID_TYPE}; + Imm imm_ {0}; +}; // Why memory ref - because you may create one link for one encode-session // And when you see this one - you can easy understand, what type of memory @@ -919,23 +850,5 @@ private: ShiftType type_ {INVALID_SHIFT}; }; -inline int64_t GetIntValue(Imm imm) -{ - int64_t value {0}; - auto type = imm.GetType(); - if (type == INT32_TYPE) { - value = imm.GetValue(); - } else if (type == INT64_TYPE) { - value = imm.GetValue(); - } else if (type == INT16_TYPE) { - value = imm.GetValue(); - } else if (type == INT8_TYPE) { - value = imm.GetValue(); - } else { - // Inconsistent int-type - UNREACHABLE(); - } - return value; -} } // namespace panda::compiler #endif // COMPILER_OPTIMIZER_CODEGEN_REGISTERS_H_ diff --git a/compiler/optimizer/code_generator/slow_path.cpp b/compiler/optimizer/code_generator/slow_path.cpp index f5fbc912aca4fddab912b65f1bff2f0293c6ba9a..324ef417962395f636c8339a250326c4b006b77f 100644 --- a/compiler/optimizer/code_generator/slow_path.cpp +++ b/compiler/optimizer/code_generator/slow_path.cpp @@ -45,11 +45,11 @@ bool SlowPathEntrypoint::GenerateThrowOutOfBoundsException(Codegen *codegen) if (GetInst()->GetOpcode() == Opcode::BoundsCheckI) { ScopedTmpReg index_reg(codegen->GetEncoder()); codegen->GetEncoder()->EncodeMov(index_reg, Imm(GetInst()->CastToBoundsCheckI()->GetImm())); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {index_reg, len_reg}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), index_reg, len_reg); } else { ASSERT(GetInst()->GetOpcode() == Opcode::BoundsCheck); auto index_reg = codegen->ConvertRegister(GetInst()->GetSrcReg(1), GetInst()->GetInputType(1)); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {index_reg, len_reg}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), index_reg, len_reg); } return true; } @@ -63,12 +63,13 @@ bool SlowPathEntrypoint::GenerateInitializeClass(Codegen *codegen) Reg klass_reg {codegen->ConvertRegister(GetInst()->GetDstReg(), DataType::REFERENCE)}; RegMask preserved_regs; codegen->GetEncoder()->SetRegister(&preserved_regs, nullptr, klass_reg); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {klass_reg}, preserved_regs); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, preserved_regs, klass_reg); } else { ASSERT(inst->GetOpcode() == Opcode::InitClass); ASSERT(!codegen->GetGraph()->IsAotMode()); + // check uintptr_t for cross: auto klass = reinterpret_cast(inst->CastToInitClass()->GetClass()); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {Imm(klass)}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), TypedImm(klass)); } return true; } @@ -79,7 +80,7 @@ bool SlowPathEntrypoint::GenerateIsInstance(Codegen *codegen) auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::REFERENCE); // obj auto klass = codegen->ConvertRegister(GetInst()->GetSrcReg(1), DataType::REFERENCE); auto dst = codegen->ConvertRegister(GetInst()->GetDstReg(), GetInst()->GetType()); - codegen->CallRuntime(GetInst(), EntrypointId::IS_INSTANCE, dst, {src, klass}); + codegen->CallRuntime(GetInst(), EntrypointId::IS_INSTANCE, dst, RegMask::GetZeroMask(), src, klass); return true; } @@ -88,7 +89,7 @@ bool SlowPathEntrypoint::GenerateCheckCast(Codegen *codegen) { auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::REFERENCE); // obj auto klass = codegen->ConvertRegister(GetInst()->GetSrcReg(1), DataType::REFERENCE); - codegen->CallRuntime(GetInst(), EntrypointId::CHECK_CAST, INVALID_REGISTER, {src, klass}); + codegen->CallRuntime(GetInst(), EntrypointId::CHECK_CAST, INVALID_REGISTER, RegMask::GetZeroMask(), src, klass); return true; } @@ -118,7 +119,7 @@ bool SlowPathEntrypoint::GenerateDeoptimize(Codegen *codegen) UNREACHABLE(); } uintptr_t value = helpers::ToUnderlying(type) | (GetInst()->GetId() << MinimumBitsToStore(DeoptimizeType::COUNT)); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {Imm(value)}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), TypedImm(value)); return true; } @@ -129,7 +130,7 @@ bool SlowPathEntrypoint::GenerateCreateObject(Codegen *codegen) auto dst = codegen->ConvertRegister(inst->GetDstReg(), inst->GetType()); auto src = codegen->ConvertRegister(inst->GetSrcReg(0), inst->GetInputType(0)); - codegen->CallRuntime(inst, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src}); + codegen->CallRuntime(inst, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, RegMask::GetZeroMask(), src); return true; } @@ -139,7 +140,7 @@ bool SlowPathEntrypoint::GenerateByEntry(Codegen *codegen) switch (GetEntrypoint()) { case EntrypointId::THROW_EXCEPTION: { auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::Type::REFERENCE); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {src}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), src); return true; } case EntrypointId::NULL_POINTER_EXCEPTION: @@ -151,7 +152,7 @@ bool SlowPathEntrypoint::GenerateByEntry(Codegen *codegen) return GenerateThrowOutOfBoundsException(codegen); case EntrypointId::NEGATIVE_ARRAY_SIZE_EXCEPTION: { auto size = codegen->ConvertRegister(GetInst()->GetSrcReg(0), GetInst()->GetInputType(0)); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {size}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), size); return true; } case EntrypointId::INITIALIZE_CLASS: @@ -233,14 +234,14 @@ void SlowPathResolveStringAot::GenerateImpl(Codegen *codegen) if (tmp_addr_reg.GetReg() != addr_reg_) { codegen->GetEncoder()->EncodeMov(tmp_addr_reg, addr_reg_); } - codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), dst_reg_, Imm(string_id_), tmp_addr_reg); + codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), dst_reg_, TypedImm(string_id_), tmp_addr_reg); } void SlowPathRefCheck::GenerateImpl(Codegen *codegen) { ASSERT(array_reg_ != INVALID_REGISTER); ASSERT(ref_reg_ != INVALID_REGISTER); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {array_reg_, ref_reg_}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), array_reg_, ref_reg_); } void SlowPathAbstract::GenerateImpl(Codegen *codegen) @@ -249,7 +250,7 @@ void SlowPathAbstract::GenerateImpl(Codegen *codegen) ASSERT(method_reg_ != INVALID_REGISTER); ScopedTmpReg method_reg(codegen->GetEncoder(), method_reg_); ASSERT(method_reg.GetReg().GetId() == method_reg_.GetId()); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {method_reg.GetReg()}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), method_reg.GetReg()); } void SlowPathCheckCast::GenerateImpl(Codegen *codegen) @@ -258,7 +259,7 @@ void SlowPathCheckCast::GenerateImpl(Codegen *codegen) auto inst = GetInst(); auto src = codegen->ConvertRegister(inst->GetSrcReg(0), inst->GetInputType(0)); - codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {class_reg_, src}); + codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, RegMask::GetZeroMask(), class_reg_, src); } void SlowPathUnresolved::GenerateImpl(Codegen *codegen) @@ -271,14 +272,15 @@ void SlowPathUnresolved::GenerateImpl(Codegen *codegen) ScopedTmpReg value_reg(codegen->GetEncoder()); if (GetInst()->GetOpcode() == Opcode::UnresolvedCallVirtual) { - codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, arg_reg_, Imm(type_id_), - Imm(slot_addr_)); + codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, arg_reg_, TypedImm(type_id_), + TypedImm(slot_addr_)); } else if (GetEntrypoint() == EntrypointId::GET_UNKNOWN_CALLEE_METHOD || GetEntrypoint() == EntrypointId::GET_UNKNOWN_STATIC_FIELD_MEMORY_ADDRESS || GetEntrypoint() == EntrypointId::GET_UNKNOWN_STATIC_FIELD_PTR) { - codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, Imm(type_id_), Imm(slot_addr_)); + codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, TypedImm(type_id_), + TypedImm(slot_addr_)); } else { - codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, Imm(type_id_)); + codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, TypedImm(type_id_)); ScopedTmpReg addr_reg(codegen->GetEncoder()); codegen->GetEncoder()->EncodeMov(addr_reg, Imm(slot_addr_)); @@ -300,7 +302,7 @@ void SlowPathJsCastDoubleToInt32::GenerateImpl(Codegen *codegen) if (!mode.IsInterpreter() && !mode.IsInterpreterEntry()) { ScopedTmpRegU64 tmp(enc); enc->EncodeMov(tmp, src_reg_); - codegen->CallRuntime(GetInst(), EntrypointId::JS_CAST_DOUBLE_TO_INT32, dst_reg_, {tmp}); + codegen->CallRuntime(GetInst(), EntrypointId::JS_CAST_DOUBLE_TO_INT32, dst_reg_, RegMask::GetZeroMask(), tmp); return; } @@ -308,7 +310,7 @@ void SlowPathJsCastDoubleToInt32::GenerateImpl(Codegen *codegen) live_regs.Reset(dst_reg_.GetId()); codegen->SaveCallerRegisters(live_regs, live_vregs, true); - codegen->FillCallParams({src_reg_}); + codegen->FillCallParams(src_reg_); codegen->EmitCallRuntimeCode(nullptr, EntrypointId::JS_CAST_DOUBLE_TO_INT32_NO_BRIDGE); auto ret_reg {codegen->GetTarget().GetReturnReg(dst_reg_.GetType())}; diff --git a/compiler/optimizer/code_generator/spill_fill_encoder.cpp b/compiler/optimizer/code_generator/spill_fill_encoder.cpp index 8488cd9210ef76a7a7ba4b12b4f385c5c6a53e6b..bd835d1b2e969a28b2d46d6430d89bd5cd437a89 100644 --- a/compiler/optimizer/code_generator/spill_fill_encoder.cpp +++ b/compiler/optimizer/code_generator/spill_fill_encoder.cpp @@ -37,6 +37,13 @@ bool SpillFillEncoder::AreConsecutiveOps(const SpillFillData &pred, const SpillF if (pred.DstType() == LocationType::STACK && pred.DstValue() != succ.DstValue() + 1U) { return false; } + // Parameter slots have another direction + if (pred.SrcType() == LocationType::STACK_PARAMETER && pred.SrcValue() != succ.SrcValue() - 1U) { + return false; + } + if (pred.DstType() == LocationType::STACK_PARAMETER && pred.DstValue() != succ.DstValue() - 1U) { + return false; + } return true; } @@ -159,16 +166,51 @@ size_t SpillFillEncoder::EncodeImmToX(const SpillFillData &sf) if (graph_->IsDynamicMethod() && const_inst->GetType() == DataType::INT64) { type = DataType::UINT32; } - auto imm = codegen_->ConvertImm(const_inst, type); - auto dst_reg = GetDstReg(sf.GetDst(), imm.GetType()); + + Imm imm; +#ifndef NDEBUG + switch (type) { + case DataType::FLOAT32: + imm = Imm(const_inst->GetFloatValue()); + break; + case DataType::FLOAT64: + imm = Imm(const_inst->GetDoubleValue()); + break; + default: + ASSERT(DataType::IsTypeNumeric(type)); + imm = Imm(const_inst->GetRawValue()); + break; + } +#else + imm = Imm(const_inst->GetRawValue()); +#endif + auto dst_reg = GetDstReg(sf.GetDst(), Codegen::ConvertDataType(type, codegen_->GetArch())); encoder_->EncodeMov(dst_reg, imm); return 1U; } ASSERT(sf.GetDst().IsAnyStack()); // imm -> stack auto dst_mem = codegen_->GetMemRefForSlot(sf.GetDst()); - auto imm = codegen_->ConvertImm(const_inst, sf.GetCommonType()); - encoder_->EncodeSti(imm, dst_mem); + auto sf_type = sf.GetCommonType(); + ASSERT(DataType::IsTypeNumeric(sf_type)); + switch (sf_type) { + case DataType::Type::FLOAT32: { + auto imm = const_inst->GetFloatValue(); + encoder_->EncodeSti(imm, dst_mem); + break; + } + case DataType::Type::FLOAT64: { + auto imm = const_inst->GetDoubleValue(); + encoder_->EncodeSti(imm, dst_mem); + break; + } + default: { + auto imm = const_inst->GetRawValue(); + auto store_size = Codegen::ConvertDataType(sf_type, codegen_->GetArch()).GetSize() / BYTE_SIZE; + encoder_->EncodeSti(imm, store_size, dst_mem); + break; + } + } return 1U; } @@ -202,7 +244,7 @@ size_t SpillFillEncoder::EncodeRegisterToX(const SpillFillData &sf, const SpillF // If address is no qword aligned and current group consist of even number of consecutive slots // then we can skip current operation. constexpr int COALESCE_OPS_LIMIT = 2; - auto skip_coalescing = (consecutive_ops_hint % COALESCE_OPS_LIMIT == 1) && (offset % QUAD_WORD_SIZE_BYTE != 0); + auto skip_coalescing = (consecutive_ops_hint % COALESCE_OPS_LIMIT == 1) && (offset % QUAD_WORD_SIZE_BYTES != 0); if (next != nullptr && CanCombineSpillFills(sf, *next, graph_) && !skip_coalescing) { auto next_reg = codegen_->ConvertRegister(next->SrcValue(), next->GetCommonType()); encoder_->EncodeStp(src_reg, next_reg, mem_ref); @@ -222,7 +264,7 @@ size_t SpillFillEncoder::EncodeStackToX(const SpillFillData &sf, const SpillFill // If address is no qword aligned and current group consist of even number of consecutive slots // then we can skip current operation. constexpr int COALESCE_OPS_LIMIT = 2; - auto skip_coalescing = (consecutive_ops_hint % COALESCE_OPS_LIMIT == 1) && (offset % QUAD_WORD_SIZE_BYTE != 0); + auto skip_coalescing = (consecutive_ops_hint % COALESCE_OPS_LIMIT == 1) && (offset % QUAD_WORD_SIZE_BYTES != 0); if (next != nullptr && CanCombineSpillFills(sf, *next, graph_) && !skip_coalescing) { auto cur_reg = codegen_->ConvertRegister(sf.DstValue(), sf.GetCommonType()); auto next_reg = codegen_->ConvertRegister(next->DstValue(), next->GetCommonType()); diff --git a/compiler/optimizer/code_generator/target/aarch32/callconv.cpp b/compiler/optimizer/code_generator/target/aarch32/callconv.cpp index 74e7d781da7dbf9cb069d75522d0ea10deca88ed..f4e2b05dde60461e278a5ab449880511b5e51da1 100644 --- a/compiler/optimizer/code_generator/target/aarch32/callconv.cpp +++ b/compiler/optimizer/code_generator/target/aarch32/callconv.cpp @@ -260,14 +260,16 @@ void Aarch32CallingConvention::GeneratePrologue([[maybe_unused]] const FrameInfo GetMasm()->Push(RegisterList(vixl::aarch32::r11, vixl::aarch32::lr)); SET_CFI_OFFSET(push_fplr, encoder->GetCursorOffset()); + ASSERT(!IsDynCallMode()); + encoder->EncodeMov(fp_reg, sp_reg); SET_CFI_OFFSET(set_fp, encoder->GetCursorOffset()); constexpr auto IMM_2 = 2; - encoder->EncodeSub(sp_reg, sp_reg, Imm(WORD_SIZE_BYTE * IMM_2)); - encoder->EncodeStr(GetTarget().GetParamReg(0), MemRef(sp_reg, WORD_SIZE_BYTE)); + encoder->EncodeSub(sp_reg, sp_reg, Imm(WORD_SIZE_BYTES * IMM_2)); + encoder->EncodeStr(GetTarget().GetParamReg(0), MemRef(sp_reg, WORD_SIZE_BYTES)); // Allocate space for locals - auto locals_size = (CFrameSlots::Start() - CFrameData::Start()) * WORD_SIZE_BYTE; + auto locals_size = (CFrameSlots::Start() - CFrameData::Start()) * WORD_SIZE_BYTES; encoder->EncodeSub(sp_reg, sp_reg, Imm(locals_size)); SET_CFI_CALLEE_REGS(GetCalleeRegsMask(Arch::AARCH32, false)); @@ -279,14 +281,14 @@ void Aarch32CallingConvention::GeneratePrologue([[maybe_unused]] const FrameInfo // Reset OSR flag and set HasFloatRegsFlag auto callee_regs_size = - (GetCalleeRegsCount(Arch::AARCH32, true) + GetCalleeRegsCount(Arch::AARCH32, false)) * WORD_SIZE_BYTE; + (GetCalleeRegsCount(Arch::AARCH32, true) + GetCalleeRegsCount(Arch::AARCH32, false)) * WORD_SIZE_BYTES; auto flags {static_cast(frame_info.GetHasFloatRegs()) << CFrameLayout::HasFloatRegsFlag::START_BIT}; - encoder->EncodeSti(Imm(flags), MemRef(sp_reg, callee_regs_size + locals_size)); + encoder->EncodeSti(flags, sizeof(flags), MemRef(sp_reg, callee_regs_size + locals_size)); encoder->EncodeSub( sp_reg, sp_reg, Imm((fl.GetSpillsCount() + fl.GetCallerRegistersCount(false) + fl.GetCallerRegistersCount(true)) * - WORD_SIZE_BYTE)); + WORD_SIZE_BYTES)); } void Aarch32CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo &frame_info, @@ -299,7 +301,7 @@ void Aarch32CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo encoder->EncodeAdd( sp_reg, sp_reg, Imm((fl.GetSpillsCount() + fl.GetCallerRegistersCount(false) + fl.GetCallerRegistersCount(true)) * - WORD_SIZE_BYTE)); + WORD_SIZE_BYTES)); GetMasm()->Vpop( SRegisterList(SRegister(GetFirstCalleeReg(Arch::AARCH32, true)), GetCalleeRegsCount(Arch::AARCH32, true))); @@ -311,8 +313,8 @@ void Aarch32CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo // Support restoring of LR and FP registers once OSR is supported in arm32 static_assert(!ArchTraits::SUPPORT_OSR); constexpr auto IMM_2 = 2; - encoder->EncodeAdd(sp_reg, sp_reg, Imm(WORD_SIZE_BYTE * IMM_2)); - encoder->EncodeAdd(sp_reg, sp_reg, Imm(WORD_SIZE_BYTE * (CFrameSlots::Start() - CFrameData::Start()))); + encoder->EncodeAdd(sp_reg, sp_reg, Imm(WORD_SIZE_BYTES * IMM_2)); + encoder->EncodeAdd(sp_reg, sp_reg, Imm(WORD_SIZE_BYTES * (CFrameSlots::Start() - CFrameData::Start()))); GetMasm()->Pop(RegisterList(vixl::aarch32::r11, vixl::aarch32::lr)); SET_CFI_OFFSET(pop_fplr, encoder->GetCursorOffset()); diff --git a/compiler/optimizer/code_generator/target/aarch32/encode.cpp b/compiler/optimizer/code_generator/target/aarch32/encode.cpp index fe8d1c2706b0aa018314f42d62f464263461ca4c..862ab349e2874448fe2aa0b0883f17f5269390b9 100644 --- a/compiler/optimizer/code_generator/target/aarch32/encode.cpp +++ b/compiler/optimizer/code_generator/target/aarch32/encode.cpp @@ -128,20 +128,19 @@ void Aarch32Encoder::EncodeBitTestAndBranch(LabelHolder::LabelId id, Reg reg, ui } } -bool Aarch32Encoder::CompareImmHelper(Reg src, Imm imm, Condition *cc) +bool Aarch32Encoder::CompareImmHelper(Reg src, int64_t imm, Condition *cc) { - auto value = GetIntValue(imm); ASSERT(src.IsScalar()); - ASSERT(value != 0); - ASSERT(-static_cast(UINT32_MAX) <= value && value <= UINT32_MAX); - ASSERT(CanEncodeImmAddSubCmp(value, src.GetSize(), IsConditionSigned(*cc))); + ASSERT(imm != 0); + ASSERT(-static_cast(UINT32_MAX) <= imm && imm <= UINT32_MAX); + ASSERT(CanEncodeImmAddSubCmp(imm, src.GetSize(), IsConditionSigned(*cc))); - return value < 0 ? CompareNegImmHelper(src, value, cc) : ComparePosImmHelper(src, value, cc); + return imm < 0 ? CompareNegImmHelper(src, imm, cc) : ComparePosImmHelper(src, imm, cc); } void Aarch32Encoder::TestImmHelper(Reg src, Imm imm, [[maybe_unused]] Condition cc) { - auto value = GetIntValue(imm); + auto value = imm.GetAsInt(); ASSERT(src.IsScalar()); ASSERT(cc == Condition::TST_EQ || cc == Condition::TST_NE); ASSERT(CanEncodeImmLogical(value, src.GetSize())); @@ -214,13 +213,13 @@ bool Aarch32Encoder::ComparePosImmHelper(Reg src, int64_t value, Condition *cc) void Aarch32Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) { - auto value = GetIntValue(imm); + auto value = imm.GetAsInt(); if (value == 0) { EncodeJump(id, src, cc); return; } - if (!CompareImmHelper(src, imm, &cc)) { + if (!CompareImmHelper(src, value, &cc)) { return; } @@ -1947,7 +1946,7 @@ void Aarch32Encoder::EncodeSub(Reg dst, Reg src, Imm imm) void Aarch32Encoder::EncodeShl(Reg dst, Reg src, Imm imm) { - auto value = static_cast(GetIntValue(imm)); + auto value = static_cast(imm.GetAsInt()); int32_t imm_value = value & (dst.GetSize() - 1); ASSERT(dst.IsScalar() && "Invalid operand type"); @@ -1973,7 +1972,7 @@ void Aarch32Encoder::EncodeShl(Reg dst, Reg src, Imm imm) void Aarch32Encoder::EncodeShr(Reg dst, Reg src, Imm imm) { - auto value = static_cast(GetIntValue(imm)); + auto value = static_cast(imm.GetAsInt()); int32_t imm_value = value & (dst.GetSize() - 1); ASSERT(dst.IsScalar() && "Invalid operand type"); @@ -2001,7 +2000,7 @@ void Aarch32Encoder::EncodeAShr(Reg dst, Reg src, Imm imm) { ASSERT(dst.IsScalar() && "Invalid operand type"); - auto value = static_cast(GetIntValue(imm)); + auto value = static_cast(imm.GetAsInt()); int32_t imm_value = value & (dst.GetSize() - 1); if (dst.GetSize() <= WORD_SIZE) { @@ -2054,9 +2053,9 @@ void Aarch32Encoder::EncodeMov(Reg dst, Imm src) { if (dst.IsFloat()) { if (dst.GetSize() == WORD_SIZE) { - GetMasm()->Vmov(Convert(dst.GetType()), VixlVReg(dst).S(), VixlNeonImm(src.GetValue())); + GetMasm()->Vmov(Convert(dst.GetType()), VixlVReg(dst).S(), VixlNeonImm(src.GetAsFloat())); } else { - GetMasm()->Vmov(Convert(dst.GetType()), VixlVReg(dst).D(), VixlNeonImm(src.GetValue())); + GetMasm()->Vmov(Convert(dst.GetType()), VixlVReg(dst).D(), VixlNeonImm(src.GetAsDouble())); } return; } @@ -2185,7 +2184,7 @@ void Aarch32Encoder::EncodeStrRelease(Reg src, MemRef mem) void Aarch32Encoder::EncodeStrz(Reg src, MemRef mem) { if (src.GetSize() <= WORD_SIZE) { - EncodeSti(Imm(static_cast(0)), mem); + EncodeSti(0, DOUBLE_WORD_SIZE_BYTES, mem); } EncodeStr(src, mem); } @@ -2195,7 +2194,7 @@ void Aarch32Encoder::EncodeStp(Reg src0, Reg src1, MemRef mem) ASSERT(src0.IsFloat() == src1.IsFloat()); ASSERT(src0.GetSize() == src1.GetSize()); EncodeStr(src0, mem); - EncodeStr(src1, MemRef(mem.GetBase(), mem.GetIndex(), mem.GetScale(), mem.GetDisp() + WORD_SIZE_BYTE)); + EncodeStr(src1, MemRef(mem.GetBase(), mem.GetIndex(), mem.GetScale(), mem.GetDisp() + WORD_SIZE_BYTES)); } void Aarch32Encoder::EncodeLdrExclusive(Reg dst, Reg addr, bool acquire) @@ -2297,27 +2296,23 @@ inline static int32_t FindRegForMem(vixl::aarch32::MemOperand mem) return -1; } -void Aarch32Encoder::EncodeSti(Imm src, MemRef mem) +void Aarch32Encoder::EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) { - if (src.GetType().IsFloat()) { - EncodeFloatSti(src, mem); - return; - } ScopedTmpRegU32 tmp_reg(this); auto tmp = VixlReg(tmp_reg); - auto type = src.GetType(); - if (src.GetSize() <= WORD_SIZE) { + auto type = TypeInfo::GetScalarTypeBySize(src_size_bytes * BITS_PER_BYTE); + if (src_size_bytes <= WORD_SIZE_BYTES) { auto vixl_mem = PrepareMemLdS(mem, type, tmp, false); if (vixl_mem.GetBaseRegister().GetCode() == tmp.GetCode()) { ScopedTmpRegU32 tmp1_reg(this); tmp = VixlReg(tmp1_reg); } GetMasm()->Mov(tmp, VixlImm(src)); - if (src.GetSize() == BYTE_SIZE) { + if (src_size_bytes == 1) { GetMasm()->Strb(tmp, vixl_mem); return; } - if (src.GetSize() == HALF_SIZE) { + if (src_size_bytes == HALF_WORD_SIZE_BYTES) { GetMasm()->Strh(tmp, vixl_mem); return; } @@ -2326,7 +2321,7 @@ void Aarch32Encoder::EncodeSti(Imm src, MemRef mem) } auto vixl_mem = PrepareMemLdS(mem, type, tmp, false, true); - ASSERT(src.GetSize() == DOUBLE_WORD_SIZE); + ASSERT(src_size_bytes == DOUBLE_WORD_SIZE_BYTES); vixl::aarch32::Register tmp_imm1; vixl::aarch32::Register tmp_imm2; // if tmp isn't base reg and tmp is even and tmp+1 isn't SP we can use tmp and tmp + 1 @@ -2344,8 +2339,8 @@ void Aarch32Encoder::EncodeSti(Imm src, MemRef mem) ASSERT(tmp_imm1.IsValid() && tmp_imm2.IsValid()); GetMasm()->Push(tmp_imm2); - GetMasm()->Mov(tmp_imm1, VixlImm(src)); - GetMasm()->Mov(tmp_imm2, VixlImmU(src)); + GetMasm()->Mov(tmp_imm1, VixlImm(Imm(src))); + GetMasm()->Mov(tmp_imm2, VixlImmU(Imm(src))); GetMasm()->Strd(tmp_imm1, tmp_imm2, vixl_mem); GetMasm()->Pop(tmp_imm2); if (tmp_imm1.GetCode() != tmp.GetCode()) { @@ -2353,18 +2348,18 @@ void Aarch32Encoder::EncodeSti(Imm src, MemRef mem) } } -void Aarch32Encoder::EncodeFloatSti(Imm src, MemRef mem) +void Aarch32Encoder::EncodeSti(float src, MemRef mem) { - ASSERT(src.GetType().IsFloat()); - if (src.GetSize() == WORD_SIZE) { - ScopedTmpRegF32 tmp_reg(this); - GetMasm()->Vmov(VixlVReg(tmp_reg).S(), src.GetValue()); - EncodeStr(tmp_reg, mem); - } else { - ScopedTmpRegF64 tmp_reg(this); - GetMasm()->Vmov(VixlVReg(tmp_reg).D(), src.GetValue()); - EncodeStr(tmp_reg, mem); - } + ScopedTmpRegF32 tmp_reg(this); + GetMasm()->Vmov(VixlVReg(tmp_reg).S(), src); + EncodeStr(tmp_reg, mem); +} + +void Aarch32Encoder::EncodeSti(double src, MemRef mem) +{ + ScopedTmpRegF64 tmp_reg(this); + GetMasm()->Vmov(VixlVReg(tmp_reg).D(), src); + EncodeStr(tmp_reg, mem); } void Aarch32Encoder::EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) @@ -2588,7 +2583,7 @@ void Aarch32Encoder::EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Reg src void Aarch32Encoder::EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm, Condition cc) { ASSERT(!src0.IsFloat() && !src1.IsFloat() && !src2.IsFloat()); - auto value = GetIntValue(imm); + auto value = imm.GetAsInt(); if (value == 0) { switch (cc) { case Condition::LO: @@ -2618,7 +2613,7 @@ void Aarch32Encoder::EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm } CompareZeroHelper(src2, &cc); } else { // value != 0 - if (!CompareImmHelper(src2, imm, &cc)) { + if (!CompareImmHelper(src2, value, &cc)) { return; } } @@ -2688,6 +2683,12 @@ bool Aarch32Encoder::CanEncodeImmLogical(uint64_t imm, uint32_t size) return false; } } +#ifndef NDEBUG + if (size < DOUBLE_WORD_SIZE) { + // Test if the highest part is consistent: + ASSERT((imm >> size == 0) || ((~imm) >> size == 0)); + } +#endif // NDEBUG return vixl::aarch32::ImmediateA32::IsImmediateA32(imm); } @@ -2701,20 +2702,20 @@ void Aarch32Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t s } vixl::aarch32::Register base_reg = VixlReg(base); - int32_t max_offset = (slot + helpers::ToSigned(registers.GetMaxRegister())) * WORD_SIZE_BYTE; + int32_t max_offset = (slot + helpers::ToSigned(registers.GetMaxRegister())) * WORD_SIZE_BYTES; ScopedTmpRegU32 tmp_reg(this); auto tmp = VixlReg(tmp_reg); // Construct single add for big offset if (is_fp) { if ((max_offset < -VMEM_OFFSET) || (max_offset > VMEM_OFFSET)) { - GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTE)); + GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTES)); slot = 0; base_reg = tmp; } } else { if ((max_offset < -MEM_BIG_OFFSET) || (max_offset > MEM_BIG_OFFSET)) { - GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTE)); + GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTES)); slot = 0; base_reg = tmp; } @@ -2736,7 +2737,7 @@ void Aarch32Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t s if (!has_mask) { index++; } - auto mem = MemOperand(base_reg, (slot + index - 1) * WORD_SIZE_BYTE); + auto mem = MemOperand(base_reg, (slot + index - 1) * WORD_SIZE_BYTES); if (is_fp) { auto reg = vixl::aarch32::SRegister(i); if constexpr (IS_STORE) { // NOLINT @@ -2768,19 +2769,19 @@ void Aarch32Encoder::LoadStoreRegisters(RegMask registers, ssize_t slot, size_t } } vixl::aarch32::Register base_reg = vixl::aarch32::sp; - auto max_offset = (slot + last_reg) * WORD_SIZE_BYTE; + auto max_offset = (slot + last_reg) * WORD_SIZE_BYTES; ScopedTmpRegU32 tmp_reg(this); auto tmp = VixlReg(tmp_reg); // Construct single add for big offset if (is_fp) { if ((max_offset < -VMEM_OFFSET) || (max_offset > VMEM_OFFSET)) { - GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTE)); + GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTES)); slot = 0; base_reg = tmp; } } else { if ((max_offset < -MEM_BIG_OFFSET) || (max_offset > MEM_BIG_OFFSET)) { - GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTE)); + GetMasm()->Add(tmp, base_reg, VixlImm(slot * WORD_SIZE_BYTES)); slot = 0; base_reg = tmp; } @@ -2789,7 +2790,7 @@ void Aarch32Encoder::LoadStoreRegisters(RegMask registers, ssize_t slot, size_t if (!registers.test(i)) { continue; } - auto mem = MemOperand(base_reg, (slot + i - start_reg) * WORD_SIZE_BYTE); + auto mem = MemOperand(base_reg, (slot + i - start_reg) * WORD_SIZE_BYTES); if (is_fp) { auto reg = vixl::aarch32::SRegister(i); if constexpr (IS_STORE) { // NOLINT diff --git a/compiler/optimizer/code_generator/target/aarch32/target.h b/compiler/optimizer/code_generator/target/aarch32/target.h index 4dd63d00935898cd99a95d1b5a420d20aa696de3..d8f5a821f6cfdcdb0b69f99e302b143af3cdbc7a 100644 --- a/compiler/optimizer/code_generator/target/aarch32/target.h +++ b/compiler/optimizer/code_generator/target/aarch32/target.h @@ -218,73 +218,17 @@ static inline vixl::aarch32::DataType Convert(const TypeInfo info, const bool is static inline vixl::aarch32::Operand VixlImm(Imm imm) { - ASSERT(imm.IsValid()); // Unsupported 64-bit values - force cast - if (imm.GetType() == INT64_TYPE) { - auto data = static_cast(imm.GetValue()); - return vixl::aarch32::Operand(data); - } - if (imm.GetType() == INT32_TYPE) { - return vixl::aarch32::Operand(imm.GetValue()); - } - if (imm.GetType() == INT16_TYPE) { - return vixl::aarch32::Operand(imm.GetValue()); - } - if (imm.GetType() == INT8_TYPE) { - return vixl::aarch32::Operand(imm.GetValue()); - } - if (imm.GetType() == FLOAT32_TYPE) { - auto data = bit_cast(imm.GetValue()); - return vixl::aarch32::Operand(data); - } - if (imm.GetType() == FLOAT64_TYPE) { - auto data = static_cast(bit_cast(imm.GetValue())); - return vixl::aarch32::Operand(data); - } - // Invalid converted register - UNREACHABLE(); - return vixl::aarch32::Operand(imm.GetValue()); + return vixl::aarch32::Operand(static_cast(imm.GetRawValue())); } // Upper half for immediate static inline vixl::aarch32::Operand VixlImmU(Imm imm) { - ASSERT(imm.IsValid()); // Unsupported 64-bit values - force cast - if (imm.GetType() == INT64_TYPE) { - // NOLINTNEXTLINE(hicpp-signed-bitwise) - auto data = static_cast(imm.GetValue() >> WORD_SIZE); - return vixl::aarch32::Operand(data); - } - if (imm.GetType() == FLOAT64_TYPE) { - auto val = bit_cast(imm.GetValue()); - // NOLINTNEXTLINE(hicpp-signed-bitwise) - auto data = static_cast(val >> WORD_SIZE); - return vixl::aarch32::Operand(data); - } - - return vixl::aarch32::Operand(0x0); -} - -static inline vixl::aarch32::DOperand VixlDImm(Imm imm) -{ - ASSERT(imm.IsValid()); - if (imm.GetType() == INT64_TYPE) { - auto data = imm.GetValue(); - return vixl::aarch32::DOperand(data); - } - if (imm.GetType() == INT32_TYPE) { - return vixl::aarch32::DOperand(imm.GetValue()); - } - if (imm.GetType() == INT16_TYPE) { - return vixl::aarch32::DOperand(imm.GetValue()); - } - if (imm.GetType() == INT8_TYPE) { - return vixl::aarch32::DOperand(imm.GetValue()); - } - // Invalid converted register - UNREACHABLE(); - return vixl::aarch32::DOperand(imm.GetValue()); + // NOLINTNEXTLINE(hicpp-signed-bitwise) + auto data = static_cast(imm.GetRawValue() >> WORD_SIZE); + return vixl::aarch32::Operand(data); } class Aarch32RegisterDescription final : public RegistersDescription { @@ -581,7 +525,9 @@ public: // zerod high part: [reg.size, 64) void EncodeStrz(Reg src, MemRef mem) override; - void EncodeSti(Imm src, MemRef mem) override; + void EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) override; + void EncodeSti(double src, MemRef mem) override; + void EncodeSti(float src, MemRef mem) override; // size must be 8, 16,32 or 64 void EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) override; // size must be 8, 16,32 or 64 @@ -892,12 +838,11 @@ private: void MakeLibCallWithInt64Result(Reg dst, Reg src0, Reg src1, void *entry_point, bool second_value); void CompareHelper(Reg src0, Reg src1, Condition *cc); void TestHelper(Reg src0, Reg src1, Condition cc); - bool CompareImmHelper(Reg src, Imm imm, Condition *cc); + bool CompareImmHelper(Reg src, int64_t imm, Condition *cc); void TestImmHelper(Reg src, Imm imm, Condition cc); bool CompareNegImmHelper(Reg src, int64_t value, const Condition *cc); bool ComparePosImmHelper(Reg src, int64_t value, Condition *cc); void CompareZeroHelper(Reg src, Condition *cc); - void EncodeFloatSti(Imm src, MemRef mem); static inline constexpr int32_t MEM_BIG_OFFSET = 4095; static inline constexpr int32_t MEM_SMALL_OFFSET = 255; static inline constexpr int32_t VMEM_OFFSET = 1020; diff --git a/compiler/optimizer/code_generator/target/aarch64/callconv.cpp b/compiler/optimizer/code_generator/target/aarch64/callconv.cpp index 939552da5fa1cd5d41b745ff482f6b6e81399ce8..595b80b660f395e860ca902929ff20a15196578a 100644 --- a/compiler/optimizer/code_generator/target/aarch64/callconv.cpp +++ b/compiler/optimizer/code_generator/target/aarch64/callconv.cpp @@ -157,6 +157,26 @@ void Aarch64CallingConvention::GeneratePrologue(const FrameInfo &frame_info) SET_CFI_OFFSET(set_fp, encoder->GetCursorOffset()); } + if (IsDynCallMode() && GetDynInfo().IsCheckRequired()) { + static_assert(CallConvDynInfo::REG_NUM_ARGS == 1); + static_assert(CallConvDynInfo::REG_COUNT == CallConvDynInfo::REG_NUM_ARGS + 1); + + ASSERT(frame_info.GetSaveFrameAndLinkRegs()); + + constexpr auto NUM_ACTUAL_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_NUM_ARGS); + constexpr auto NUM_EXPECTED_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_COUNT); + auto num_expected = GetDynInfo().GetNumExpectedArgs(); + + auto expand_done = encoder->CreateLabel(); + encoder->EncodeJump(expand_done, NUM_ACTUAL_REG, Imm(num_expected), Condition::GE); + encoder->EncodeMov(NUM_EXPECTED_REG, Imm(num_expected)); + + MemRef expand_entrypoint(Reg(GetThreadReg(Arch::AARCH64), GetTarget().GetPtrRegType()), + GetDynInfo().GetExpandEntrypointTlsOffset()); + GetEncoder()->MakeCall(expand_entrypoint); + encoder->BindLabel(expand_done); + } + // Reset flags and setup method if (frame_info.GetSetupFrame()) { static_assert(CFrameMethod::End() == CFrameFlags::Start()); @@ -281,6 +301,26 @@ void Aarch64CallingConvention::GenerateNativePrologue(const FrameInfo &frame_inf SET_CFI_OFFSET(set_fp, encoder->GetCursorOffset()); } + if (IsDynCallMode() && GetDynInfo().IsCheckRequired()) { + static_assert(CallConvDynInfo::REG_NUM_ARGS == 1); + static_assert(CallConvDynInfo::REG_COUNT == CallConvDynInfo::REG_NUM_ARGS + 1); + + ASSERT(frame_info.GetSaveFrameAndLinkRegs()); + + constexpr auto NUM_ACTUAL_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_NUM_ARGS); + constexpr auto NUM_EXPECTED_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_COUNT); + auto num_expected = GetDynInfo().GetNumExpectedArgs(); + + auto expand_done = encoder->CreateLabel(); + encoder->EncodeJump(expand_done, NUM_ACTUAL_REG, Imm(num_expected), Condition::GE); + encoder->EncodeMov(NUM_EXPECTED_REG, Imm(num_expected)); + + MemRef expand_entrypoint(Reg(GetThreadReg(Arch::AARCH64), GetTarget().GetPtrRegType()), + GetDynInfo().GetExpandEntrypointTlsOffset()); + GetEncoder()->MakeCall(expand_entrypoint); + encoder->BindLabel(expand_done); + } + // Save callee-saved registers RegMask callee_regs_mask; VRegMask callee_vregs_mask; diff --git a/compiler/optimizer/code_generator/target/aarch64/encode.cpp b/compiler/optimizer/code_generator/target/aarch64/encode.cpp index a85a87b8295a75e81f5178a741de7dfee58b4b41..1365c615c313f7f04853a8b6fd48c09767b394bd 100644 --- a/compiler/optimizer/code_generator/target/aarch64/encode.cpp +++ b/compiler/optimizer/code_generator/target/aarch64/encode.cpp @@ -119,7 +119,7 @@ void Aarch64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src0, Reg src1, Con void Aarch64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Condition cc) { - auto value = GetIntValue(imm); + auto value = imm.GetAsInt(); if (value == 0) { EncodeJump(id, src, cc); return; @@ -148,8 +148,8 @@ void Aarch64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, C { ASSERT(src.IsScalar()); - auto value = GetIntValue(imm); - if (CanEncodeImmLogical(value, imm.GetSize() > WORD_SIZE ? DOUBLE_WORD_SIZE : WORD_SIZE)) { + auto value = imm.GetAsInt(); + if (CanEncodeImmLogical(value, src.GetSize() > WORD_SIZE ? DOUBLE_WORD_SIZE : WORD_SIZE)) { GetMasm()->Tst(VixlReg(src), VixlImm(value)); auto label = static_cast(GetLabels())->GetLabel(id); GetMasm()->B(label, ConvertTest(cc)); @@ -691,13 +691,13 @@ void Aarch64Encoder::EncodeStringEquals(Reg dst, Reg str1, Reg str2, bool compre // code without additional operations wins. int tmp3 = str1.length() * ; // data size in bytes - tmp3 = tmp3 + DATA_OFFSET - DOUBLE_WORD_SIZE_BYTE; // offset of last 8 data bytes (last octet) + tmp3 = tmp3 + DATA_OFFSET - DOUBLE_WORD_SIZE_BYTES; // offset of last 8 data bytes (last octet) while (tmp3 >= DATA_OFFSET) { if ((str1 + tmp3) != (str2 + tmp3)) return false; tmp3 -= 8; } // less than 8 bytes left to load and check. possibly 0. - if (tmp3 == DATA_OFFSET - DOUBLE_WORD_SIZE_BYTE) return true; // 0 bytes left + if (tmp3 == DATA_OFFSET - DOUBLE_WORD_SIZE_BYTES) return true; // 0 bytes left // 1..7 bytes left. Read whole octet (8 bytes) including few bytes from object header. Shift off header bytes tmp1 = (str1 + tmp3); tmp2 = (str2 + tmp3); @@ -754,7 +754,7 @@ void Aarch64Encoder::EncodeStringEqualsMainLoop(Reg dst, Reg str1, Reg str2, Reg auto label_end = static_cast(GetLabels())->GetLabel(CreateLabel()); auto label_loop = static_cast(GetLabels())->GetLabel(CreateLabel()); // Now tmp3 is byte-counter. Use it as offset register as well. - GetMasm()->Add(tmp3, tmp3, data_offset - DOUBLE_WORD_SIZE_BYTE); + GetMasm()->Add(tmp3, tmp3, data_offset - DOUBLE_WORD_SIZE_BYTES); GetMasm()->B(label_loop_begin); GetMasm()->Bind(label_false); EncodeMov(dst, Imm(0)); @@ -770,14 +770,14 @@ void Aarch64Encoder::EncodeStringEqualsMainLoop(Reg dst, Reg str1, Reg str2, Reg EncodeLdr(tmp2_scoped, false, str2_last_word_mem); GetMasm()->Cmp(tmp1, tmp2); GetMasm()->B(label_cset, vixl::aarch64::Condition::ne); - GetMasm()->Sub(tmp3, tmp3, DOUBLE_WORD_SIZE_BYTE); + GetMasm()->Sub(tmp3, tmp3, DOUBLE_WORD_SIZE_BYTES); GetMasm()->Bind(label_loop_begin); GetMasm()->Cmp(tmp3, data_offset); GetMasm()->B(label_loop, vixl::aarch64::Condition::ge); } // case: 0..7 bytes left (tmp3 is DATA_OFFSET + -8..0) - GetMasm()->Cmp(tmp3, data_offset - DOUBLE_WORD_SIZE_BYTE); + GetMasm()->Cmp(tmp3, data_offset - DOUBLE_WORD_SIZE_BYTES); GetMasm()->B(label_cset, vixl::aarch64::Condition::eq); EncodeLdr(tmp1_scoped, false, str1_last_word_mem); EncodeLdr(tmp2_scoped, false, str2_last_word_mem); @@ -851,7 +851,7 @@ void Aarch64Encoder::IndexOfHandleLatin1Case(Reg str, Reg character, Reg idx, Re GetMasm()->Sub(tmp2.X(), tmp2.X(), tmp1.X()); } } - GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); + GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); auto label_small_loop = static_cast(GetLabels())->GetLabel(CreateLabel()); GetMasm()->B(label_small_loop, vixl::aarch64::Condition::gt); @@ -898,12 +898,12 @@ void Aarch64Encoder::IndexOfHandleLatin1CaseMainLoop( GetMasm()->Orr(tmp1.X(), tmp1.X(), LATIN1_MASK); GetMasm()->Bics(tmp1.X(), tmp3.X(), tmp1.X()); GetMasm()->B(label_has_zero, vixl::aarch64::Condition::ne); - GetMasm()->Add(tmp2.X(), tmp2.X(), DOUBLE_WORD_SIZE_BYTE); - GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); // has enough bytes left to read whole register? + GetMasm()->Add(tmp2.X(), tmp2.X(), DOUBLE_WORD_SIZE_BYTES); + GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); // has enough bytes left to read whole register? GetMasm()->B(label_loop, vixl::aarch64::Condition::lt); // yes. time to loop } GetMasm()->Cbz(tmp2.X(), character_is_zero ? label_not_found : label_not_found_restore_char); // done - GetMasm()->Mov(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); // setup data to read last 8 bytes. One more loop + GetMasm()->Mov(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); // setup data to read last 8 bytes. One more loop GetMasm()->B(label_loop); GetMasm()->Bind(label_small_loop); { @@ -985,7 +985,7 @@ void Aarch64Encoder::IndexOfHandleUtf16NormalCase(Reg str, Reg character, Reg id GetMasm()->Sub(tmp2.X(), tmp1.X(), vixl::aarch64::Operand(tmp2.X(), lsl, UTF16_IDX2OFFSET_SHIFT)); GetMasm()->Neg(tmp2.X(), tmp2.X()); } - GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); + GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); GetMasm()->B(label_small_loop, vixl::aarch64::Condition::gt); // clone character to the size of register (i.e. 4 x 16-bit characters) if (!character_is_zero) { @@ -1023,12 +1023,12 @@ void Aarch64Encoder::IndexOfHandleUtf16NormalCaseMainLoop( GetMasm()->Orr(tmp1.X(), tmp1.X(), UTF16_MASK); GetMasm()->Bics(tmp1.X(), tmp3.X(), tmp1.X()); GetMasm()->B(label_has_zero, vixl::aarch64::Condition::ne); - GetMasm()->Add(tmp2.X(), tmp2.X(), DOUBLE_WORD_SIZE_BYTE); - GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); // has enough bytes left to read whole register? + GetMasm()->Add(tmp2.X(), tmp2.X(), DOUBLE_WORD_SIZE_BYTES); + GetMasm()->Cmp(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); // has enough bytes left to read whole register? GetMasm()->B(label_loop, vixl::aarch64::Condition::lt); // yes. time to loop } GetMasm()->Cbz(tmp2.X(), character_is_zero ? label_not_found : label_not_found_restore_char); // done - GetMasm()->Mov(tmp2.X(), -DOUBLE_WORD_SIZE_BYTE); // setup data to read last 8 bytes. One more loop + GetMasm()->Mov(tmp2.X(), -DOUBLE_WORD_SIZE_BYTES); // setup data to read last 8 bytes. One more loop GetMasm()->B(label_loop); GetMasm()->Bind(label_small_loop); { @@ -2104,12 +2104,12 @@ void Aarch64Encoder::EncodeShl(Reg dst, Reg src, Imm imm) return; } - GetMasm()->Lsl(VixlReg(dst), VixlReg(src), GetIntValue(imm)); + GetMasm()->Lsl(VixlReg(dst), VixlReg(src), imm.GetAsInt()); } void Aarch64Encoder::EncodeShr(Reg dst, Reg src, Imm imm) { - int64_t imm_value = static_cast(GetIntValue(imm)) & (dst.GetSize() - 1); + int64_t imm_value = static_cast(imm.GetAsInt()) & (dst.GetSize() - 1); ASSERT(dst.IsScalar() && "Invalid operand type"); auto rzero = GetRegfile()->GetZeroReg().GetId(); @@ -2125,7 +2125,7 @@ void Aarch64Encoder::EncodeShr(Reg dst, Reg src, Imm imm) void Aarch64Encoder::EncodeAShr(Reg dst, Reg src, Imm imm) { ASSERT(dst.IsScalar() && "Invalid operand type"); - GetMasm()->Asr(VixlReg(dst), VixlReg(src), GetIntValue(imm)); + GetMasm()->Asr(VixlReg(dst), VixlReg(src), imm.GetAsInt()); } void Aarch64Encoder::EncodeAnd(Reg dst, Reg src, Imm imm) @@ -2150,13 +2150,17 @@ void Aarch64Encoder::EncodeMov(Reg dst, Imm src) { if (dst.IsFloat()) { if (dst.GetSize() == WORD_SIZE) { - GetMasm()->Fmov(VixlVReg(dst), src.GetValue()); + GetMasm()->Fmov(VixlVReg(dst), src.GetAsFloat()); } else { - GetMasm()->Fmov(VixlVReg(dst), src.GetValue()); + GetMasm()->Fmov(VixlVReg(dst), src.GetAsDouble()); } return; } - GetMasm()->Mov(VixlReg(dst), VixlImm(src)); + if (dst.GetSize() > WORD_SIZE) { + GetMasm()->Mov(VixlReg(dst), VixlImm(src)); + } else { + GetMasm()->Mov(VixlReg(dst), VixlImm(static_cast(src.GetAsInt()))); + } } void Aarch64Encoder::EncodeLdr(Reg dst, bool dst_signed, MemRef mem) @@ -2527,7 +2531,7 @@ void Aarch64Encoder::EncodeStrz(Reg src, MemRef mem) GetMasm()->Str(VixlReg(src.As(INT64_TYPE)), ConvertMem(mem)); } -void Aarch64Encoder::EncodeSti(Imm src, MemRef mem) +void Aarch64Encoder::EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) { if (!ConvertMem(mem).IsValid()) { auto rzero = GetRegfile()->GetZeroReg(); @@ -2535,33 +2539,45 @@ void Aarch64Encoder::EncodeSti(Imm src, MemRef mem) return; } - if (src.GetType().IsFloat()) { - if (src.GetSize() == WORD_SIZE) { - ScopedTmpRegF32 tmp_reg(this); - GetMasm()->Fmov(VixlVReg(tmp_reg).S(), src.GetValue()); - EncodeStr(tmp_reg, mem); - } else { - ScopedTmpRegF64 tmp_reg(this); - GetMasm()->Fmov(VixlVReg(tmp_reg).D(), src.GetValue()); - EncodeStr(tmp_reg, mem); - } - return; - } - ScopedTmpRegU64 tmp_reg(this); auto tmp = VixlReg(tmp_reg); GetMasm()->Mov(tmp, VixlImm(src)); - if (src.GetSize() == BYTE_SIZE) { + if (src_size_bytes == 1U) { GetMasm()->Strb(tmp, ConvertMem(mem)); return; } - if (src.GetSize() == HALF_SIZE) { + if (src_size_bytes == HALF_WORD_SIZE_BYTES) { GetMasm()->Strh(tmp, ConvertMem(mem)); return; } + ASSERT((src_size_bytes == WORD_SIZE_BYTES) || (src_size_bytes == DOUBLE_WORD_SIZE_BYTES)); GetMasm()->Str(tmp, ConvertMem(mem)); } +void Aarch64Encoder::EncodeSti(float src, MemRef mem) +{ + if (!ConvertMem(mem).IsValid()) { + auto rzero = GetRegfile()->GetZeroReg(); + EncodeStr(rzero, mem); + return; + } + ScopedTmpRegF32 tmp_reg(this); + GetMasm()->Fmov(VixlVReg(tmp_reg).S(), src); + EncodeStr(tmp_reg, mem); +} + +void Aarch64Encoder::EncodeSti(double src, MemRef mem) +{ + if (!ConvertMem(mem).IsValid()) { + auto rzero = GetRegfile()->GetZeroReg(); + EncodeStr(rzero, mem); + return; + } + ScopedTmpRegF64 tmp_reg(this); + GetMasm()->Fmov(VixlVReg(tmp_reg).D(), src); + EncodeStr(tmp_reg, mem); +} + void Aarch64Encoder::EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) { if (!ConvertMem(mem_from).IsValid() || !ConvertMem(mem_to).IsValid()) { @@ -2691,7 +2707,7 @@ void Aarch64Encoder::EncodeSelectTest(Reg dst, Reg src0, Reg src1, Reg src2, Reg void Aarch64Encoder::EncodeSelectTest(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm, Condition cc) { ASSERT(!src0.IsFloat() && !src1.IsFloat() && !src2.IsFloat()); - ASSERT(CanEncodeImmLogical(GetIntValue(imm), imm.GetSize() > WORD_SIZE ? DOUBLE_WORD_SIZE : WORD_SIZE)); + ASSERT(CanEncodeImmLogical(imm.GetAsInt(), src2.GetSize() > WORD_SIZE ? DOUBLE_WORD_SIZE : WORD_SIZE)); GetMasm()->Tst(VixlReg(src2), VixlImm(imm)); GetMasm()->Csel(VixlReg(dst), VixlReg(src0), VixlReg(src1), ConvertTest(cc)); } @@ -2817,7 +2833,7 @@ void Aarch64Encoder::EncodeOrNot(Reg dst, Reg src0, Shift src1) void Aarch64Encoder::EncodeExtractBits(Reg dst, Reg src0, Imm imm1, Imm imm2) { - GetMasm()->Ubfx(VixlReg(dst), VixlReg(src0), GetIntValue(imm1), GetIntValue(imm2)); + GetMasm()->Ubfx(VixlReg(dst), VixlReg(src0), imm1.GetAsInt(), imm2.GetAsInt()); } void Aarch64Encoder::EncodeAndNot(Reg dst, Reg src0, Reg src1) @@ -2876,6 +2892,12 @@ bool Aarch64Encoder::CanEncodeImmAddSubCmp(int64_t imm, [[maybe_unused]] uint32_ bool Aarch64Encoder::CanEncodeImmLogical(uint64_t imm, uint32_t size) { +#ifndef NDEBUG + if (size < DOUBLE_WORD_SIZE) { + // Test if the highest part is consistent: + ASSERT((imm >> size == 0) || ((~imm) >> size == 0)); + } +#endif // NDEBUG return vixl::aarch64::Assembler::IsImmLogical(imm, size); } @@ -3046,12 +3068,12 @@ void Aarch64Encoder::LoadStoreRegisters(RegMask registers, ssize_t slot, size_t } // Construct single add for big offset size_t sp_offset = 0; - auto last_offset = (slot + last_reg - start_reg) * DOUBLE_WORD_SIZE_BYTE; + auto last_offset = (slot + last_reg - start_reg) * DOUBLE_WORD_SIZE_BYTES; if (!vixl::aarch64::Assembler::IsImmLSPair(last_offset, vixl::aarch64::kXRegSizeInBytesLog2)) { ScopedTmpReg lr_reg(this, true); auto tmp = VixlReg(lr_reg); - sp_offset = slot * DOUBLE_WORD_SIZE_BYTE; + sp_offset = slot * DOUBLE_WORD_SIZE_BYTES; slot = 0; if (vixl::aarch64::Assembler::IsImmAddSub(sp_offset)) { GetMasm()->Add(tmp, vixl::aarch64::sp, VixlImm(sp_offset)); @@ -3072,15 +3094,15 @@ void Aarch64Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t s return; } - int32_t max_offset = (slot + helpers::ToSigned(registers.GetMaxRegister())) * DOUBLE_WORD_SIZE_BYTE; - int32_t min_offset = (slot + helpers::ToSigned(registers.GetMinRegister())) * DOUBLE_WORD_SIZE_BYTE; + int32_t max_offset = (slot + helpers::ToSigned(registers.GetMaxRegister())) * DOUBLE_WORD_SIZE_BYTES; + int32_t min_offset = (slot + helpers::ToSigned(registers.GetMinRegister())) * DOUBLE_WORD_SIZE_BYTES; ScopedTmpReg tmp_reg(this, true); // Construct single add for big offset if (!vixl::aarch64::Assembler::IsImmLSPair(min_offset, vixl::aarch64::kXRegSizeInBytesLog2) || !vixl::aarch64::Assembler::IsImmLSPair(max_offset, vixl::aarch64::kXRegSizeInBytesLog2)) { auto lr_reg = VixlReg(tmp_reg); - ssize_t sp_offset = slot * DOUBLE_WORD_SIZE_BYTE; + ssize_t sp_offset = slot * DOUBLE_WORD_SIZE_BYTES; if (vixl::aarch64::Assembler::IsImmAddSub(sp_offset)) { GetMasm()->Add(lr_reg, VixlReg(base), VixlImm(sp_offset)); } else { @@ -3121,17 +3143,17 @@ void Aarch64Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t s static constexpr ssize_t OFFSET = 2; if constexpr (IS_STORE) { // NOLINT GetMasm()->Stp(last_reg, reg, - MemOperand(base_reg, (slot + index - OFFSET) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + index - OFFSET) * DOUBLE_WORD_SIZE_BYTES)); } else { // NOLINT GetMasm()->Ldp(last_reg, reg, - MemOperand(base_reg, (slot + index - OFFSET) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + index - OFFSET) * DOUBLE_WORD_SIZE_BYTES)); } last_id = -1; } else { if constexpr (IS_STORE) { // NOLINT - GetMasm()->Str(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->Str(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTES)); } else { // NOLINT - GetMasm()->Ldr(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->Ldr(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTES)); } last_id = id; last_index = index; @@ -3145,9 +3167,9 @@ void Aarch64Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t s auto last_reg = CPURegister(last_id, vixl::aarch64::kXRegSize, is_fp ? CPURegister::kVRegister : CPURegister::kRegister); if constexpr (IS_STORE) { // NOLINT - GetMasm()->Str(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->Str(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTES)); } else { // NOLINT - GetMasm()->Ldr(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->Ldr(last_reg, MemOperand(base_reg, (slot + last_index - 1) * DOUBLE_WORD_SIZE_BYTES)); } } } @@ -3173,19 +3195,19 @@ void Aarch64Encoder::LoadStoreRegistersLoop(RegMask registers, ssize_t slot, siz if (next_reg.IsValid() && (next_reg.GetCode() - 1 == curr_reg.GetCode())) { if constexpr (IS_STORE) { // NOLINT GetMasm()->Stp(curr_reg, next_reg, - MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTES)); } else { // NOLINT GetMasm()->Ldp(curr_reg, next_reg, - MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTES)); } next_reg = get_next_reg(); } else { if constexpr (IS_STORE) { // NOLINT GetMasm()->Str(curr_reg, - MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTES)); } else { // NOLINT GetMasm()->Ldr(curr_reg, - MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTE)); + MemOperand(base_reg, (slot + curr_reg.GetCode() - start_reg) * DOUBLE_WORD_SIZE_BYTES)); } } } @@ -3193,7 +3215,7 @@ void Aarch64Encoder::LoadStoreRegistersLoop(RegMask registers, ssize_t slot, siz void Aarch64Encoder::PushRegisters(RegMask registers, bool is_fp, [[maybe_unused]] bool align) { - static constexpr size_t PAIR_OFFSET = 2 * DOUBLE_WORD_SIZE_BYTE; + static constexpr size_t PAIR_OFFSET = 2 * DOUBLE_WORD_SIZE_BYTES; Register last_reg = INVALID_REG; for (size_t i = 0; i < registers.size(); i++) { if (registers[i]) { @@ -3216,11 +3238,11 @@ void Aarch64Encoder::PushRegisters(RegMask registers, bool is_fp, [[maybe_unused if (last_reg != INVALID_REG) { if (is_fp) { GetMasm()->str(vixl::aarch64::VRegister(last_reg, DOUBLE_WORD_SIZE), - MemOperand(vixl::aarch64::sp, align ? -PAIR_OFFSET : -DOUBLE_WORD_SIZE_BYTE, + MemOperand(vixl::aarch64::sp, align ? -PAIR_OFFSET : -DOUBLE_WORD_SIZE_BYTES, vixl::aarch64::AddrMode::PreIndex)); } else { GetMasm()->str(vixl::aarch64::Register(last_reg, DOUBLE_WORD_SIZE), - MemOperand(vixl::aarch64::sp, align ? -PAIR_OFFSET : -DOUBLE_WORD_SIZE_BYTE, + MemOperand(vixl::aarch64::sp, align ? -PAIR_OFFSET : -DOUBLE_WORD_SIZE_BYTES, vixl::aarch64::AddrMode::PreIndex)); } } @@ -3228,17 +3250,17 @@ void Aarch64Encoder::PushRegisters(RegMask registers, bool is_fp, [[maybe_unused void Aarch64Encoder::PopRegisters(RegMask registers, bool is_fp, [[maybe_unused]] bool align) { - static constexpr size_t PAIR_OFFSET = 2 * DOUBLE_WORD_SIZE_BYTE; + static constexpr size_t PAIR_OFFSET = 2 * DOUBLE_WORD_SIZE_BYTES; Register last_reg; if ((registers.count() & 1U) != 0) { last_reg = registers.GetMaxRegister(); if (is_fp) { GetMasm()->ldr(vixl::aarch64::VRegister(last_reg, DOUBLE_WORD_SIZE), - MemOperand(vixl::aarch64::sp, align ? PAIR_OFFSET : DOUBLE_WORD_SIZE_BYTE, + MemOperand(vixl::aarch64::sp, align ? PAIR_OFFSET : DOUBLE_WORD_SIZE_BYTES, vixl::aarch64::AddrMode::PostIndex)); } else { GetMasm()->ldr(vixl::aarch64::Register(last_reg, DOUBLE_WORD_SIZE), - MemOperand(vixl::aarch64::sp, align ? PAIR_OFFSET : DOUBLE_WORD_SIZE_BYTE, + MemOperand(vixl::aarch64::sp, align ? PAIR_OFFSET : DOUBLE_WORD_SIZE_BYTES, vixl::aarch64::AddrMode::PostIndex)); } registers.reset(last_reg); diff --git a/compiler/optimizer/code_generator/target/aarch64/target.h b/compiler/optimizer/code_generator/target/aarch64/target.h index 3cc73e8afc138d84a327cd3c688a2340493d5348..ce7902500c913d281f9964bd1d92349080a5ee7b 100644 --- a/compiler/optimizer/code_generator/target/aarch64/target.h +++ b/compiler/optimizer/code_generator/target/aarch64/target.h @@ -209,22 +209,7 @@ static inline vixl::aarch64::Operand VixlImm(const int64_t imm) static inline vixl::aarch64::Operand VixlImm(Imm imm) { - ASSERT(imm.IsValid()); - if (imm.GetType() == INT64_TYPE) { - return vixl::aarch64::Operand(imm.GetValue()); - } - if (imm.GetType() == INT32_TYPE) { - return vixl::aarch64::Operand(imm.GetValue()); - } - if (imm.GetType() == INT16_TYPE) { - return vixl::aarch64::Operand(imm.GetValue()); - } - if (imm.GetType() == INT8_TYPE) { - return vixl::aarch64::Operand(imm.GetValue()); - } - // Invalid converted register - UNREACHABLE(); - return vixl::aarch64::Operand(imm.GetValue()); + return vixl::aarch64::Operand(imm.GetAsInt()); } static inline vixl::aarch64::MemOperand ConvertMem(MemRef mem) @@ -532,7 +517,9 @@ public: // zerod high part: [reg.size, 64) void EncodeStrz(Reg src, MemRef mem) override; - void EncodeSti(Imm src, MemRef mem) override; + void EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) override; + void EncodeSti(double src, MemRef mem) override; + void EncodeSti(float src, MemRef mem) override; // size must be 8, 16,32 or 64 void EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) override; // size must be 8, 16,32 or 64 diff --git a/compiler/optimizer/code_generator/target/amd64/callconv.cpp b/compiler/optimizer/code_generator/target/amd64/callconv.cpp index aadfe1ff1c5ad457c55a0ca876309d8ddc5018dc..ce0f71ee3726548a96fd99ea2223844081b4d54f 100644 --- a/compiler/optimizer/code_generator/target/amd64/callconv.cpp +++ b/compiler/optimizer/code_generator/target/amd64/callconv.cpp @@ -56,7 +56,7 @@ size_t Amd64CallingConvention::PushRegs(RegList regs, RegList vregs) uint32_t ii {MAX_NUM_REGS - i - 1}; if (vregs.Has(ii)) { ++vregs_count; - GetMasm()->sub(asmjit::x86::rsp, asmjit::imm(DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->sub(asmjit::x86::rsp, asmjit::imm(DOUBLE_WORD_SIZE_BYTES)); GetMasm()->movsd(asmjit::x86::ptr(asmjit::x86::rsp), asmjit::x86::xmm(ii)); } } @@ -88,7 +88,7 @@ size_t Amd64CallingConvention::PopRegs(RegList regs, RegList vregs) if (vregs.Has(i)) { ++vregs_count; GetMasm()->movsd(asmjit::x86::xmm(i), asmjit::x86::ptr(asmjit::x86::rsp)); - GetMasm()->add(asmjit::x86::rsp, asmjit::imm(DOUBLE_WORD_SIZE_BYTE)); + GetMasm()->add(asmjit::x86::rsp, asmjit::imm(DOUBLE_WORD_SIZE_BYTES)); } } @@ -138,15 +138,33 @@ void Amd64CallingConvention::GeneratePrologue([[maybe_unused]] const FrameInfo & encoder->EncodeMov(fp_reg, sp_reg); SET_CFI_OFFSET(set_fp, encoder->GetCursorOffset()); - encoder->EncodeSub(sp_reg, sp_reg, Imm(2U * DOUBLE_WORD_SIZE_BYTE)); - encoder->EncodeStr(GetTarget().GetParamReg(0), MemRef(sp_reg, DOUBLE_WORD_SIZE_BYTE)); + if (IsDynCallMode() && GetDynInfo().IsCheckRequired()) { + static_assert(CallConvDynInfo::REG_NUM_ARGS == 1); + static_assert(CallConvDynInfo::REG_COUNT == CallConvDynInfo::REG_NUM_ARGS + 1); + + constexpr auto NUM_ACTUAL_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_NUM_ARGS); + constexpr auto NUM_EXPECTED_REG = GetTarget().GetParamReg(CallConvDynInfo::REG_COUNT); + auto num_expected = GetDynInfo().GetNumExpectedArgs(); + + auto expand_done = encoder->CreateLabel(); + encoder->EncodeJump(expand_done, NUM_ACTUAL_REG, Imm(num_expected), Condition::GE); + encoder->EncodeMov(NUM_EXPECTED_REG, Imm(num_expected)); + + MemRef expand_entrypoint(Reg(GetThreadReg(Arch::X86_64), GetTarget().GetPtrRegType()), + GetDynInfo().GetExpandEntrypointTlsOffset()); + GetEncoder()->MakeCall(expand_entrypoint); + encoder->BindLabel(expand_done); + } + + encoder->EncodeSub(sp_reg, sp_reg, Imm(2U * DOUBLE_WORD_SIZE_BYTES)); + encoder->EncodeStr(GetTarget().GetParamReg(0), MemRef(sp_reg, DOUBLE_WORD_SIZE_BYTES)); // Reset OSR flag and set HasFloatRegsFlag auto flags {static_cast(frame_info.GetHasFloatRegs()) << CFrameLayout::HasFloatRegsFlag::START_BIT}; - encoder->EncodeSti(Imm(flags), MemRef(sp_reg)); + encoder->EncodeSti(flags, sizeof(flags), MemRef(sp_reg)); // Allocate space for locals - encoder->EncodeSub(sp_reg, sp_reg, Imm(DOUBLE_WORD_SIZE_BYTE * (CFrameSlots::Start() - CFrameData::Start()))); + encoder->EncodeSub(sp_reg, sp_reg, Imm(DOUBLE_WORD_SIZE_BYTES * (CFrameSlots::Start() - CFrameData::Start()))); static_assert((CFrameLayout::GetLocalsCount() & 1U) == 0); RegList callee_regs {GetCalleeRegsMask(Arch::X86_64, false).GetValue()}; @@ -159,7 +177,7 @@ void Amd64CallingConvention::GeneratePrologue([[maybe_unused]] const FrameInfo & encoder->EncodeSub( sp_reg, sp_reg, Imm((fl.GetSpillsCount() + fl.GetCallerRegistersCount(false) + fl.GetCallerRegistersCount(true)) * - DOUBLE_WORD_SIZE_BYTE)); + DOUBLE_WORD_SIZE_BYTES)); } void Amd64CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo &frame_info, @@ -176,7 +194,7 @@ void Amd64CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo & encoder->EncodeAdd( sp_reg, sp_reg, Imm((fl.GetSpillsCount() + fl.GetCallerRegistersCount(false) + fl.GetCallerRegistersCount(true)) * - DOUBLE_WORD_SIZE_BYTE)); + DOUBLE_WORD_SIZE_BYTES)); PopRegs(RegList(GetCalleeRegsMask(Arch::X86_64, false).GetValue()), RegList(GetCalleeRegsMask(Arch::X86_64, true).GetValue())); @@ -186,7 +204,7 @@ void Amd64CallingConvention::GenerateEpilogue([[maybe_unused]] const FrameInfo & ASSERT(!IsOsrMode()); // Support restoring of LR and FP registers once OSR is supported in x86_64 static_assert(!ArchTraits::SUPPORT_OSR); - constexpr auto SHIFT = DOUBLE_WORD_SIZE_BYTE * (2 + CFrameSlots::Start() - CFrameData::Start()); + constexpr auto SHIFT = DOUBLE_WORD_SIZE_BYTES * (2 + CFrameSlots::Start() - CFrameData::Start()); encoder->EncodeAdd(sp_reg, sp_reg, Imm(SHIFT)); GetMasm()->pop(asmjit::x86::rbp); // frame pointer diff --git a/compiler/optimizer/code_generator/target/amd64/encode.cpp b/compiler/optimizer/code_generator/target/amd64/encode.cpp index 3f9b569adc25e132d75e08c3e31b662ba15d3e59..a03c727c049a06c4426a79f463b4d5321e0e585e 100644 --- a/compiler/optimizer/code_generator/target/amd64/encode.cpp +++ b/compiler/optimizer/code_generator/target/amd64/encode.cpp @@ -177,7 +177,7 @@ void Amd64Encoder::EncodeJump(LabelHolder::LabelId id, Reg src, Imm imm, Conditi { ASSERT(src.IsScalar()); - auto imm_val = ImmToSignedInt(imm); + auto imm_val = imm.GetAsInt(); if (imm_val == 0) { EncodeJump(id, src, cc); return; @@ -218,7 +218,7 @@ void Amd64Encoder::EncodeJumpTest(LabelHolder::LabelId id, Reg src, Imm imm, Con { ASSERT(src.IsScalar()); - auto imm_val = ImmToSignedInt(imm); + auto imm_val = imm.GetAsInt(); if (ImmFitsSize(imm_val, src.GetSize())) { auto label = static_cast(GetLabels())->GetLabel(id); @@ -1129,8 +1129,10 @@ void Amd64Encoder::EncodeDiv(Reg dst, bool dst_signed, Reg src0, Reg src1) auto neg_path = GetMasm()->newLabel(); auto crossroad = GetMasm()->newLabel(); - GetMasm()->cmp(ArchReg(src1), asmjit::imm(-1)); - GetMasm()->je(neg_path); + if (dst_signed) { + GetMasm()->cmp(ArchReg(src1), asmjit::imm(-1)); + GetMasm()->je(neg_path); + } if (dst.GetId() != ConvertRegNumber(asmjit::x86::rdx.id())) { GetMasm()->push(asmjit::x86::rdx); @@ -1204,8 +1206,10 @@ void Amd64Encoder::EncodeMod(Reg dst, bool dst_signed, Reg src0, Reg src1) auto zero_path = GetMasm()->newLabel(); auto crossroad = GetMasm()->newLabel(); - GetMasm()->cmp(ArchReg(src1), asmjit::imm(-1)); - GetMasm()->je(zero_path); + if (dst_signed) { + GetMasm()->cmp(ArchReg(src1), asmjit::imm(-1)); + GetMasm()->je(zero_path); + } if (dst.GetId() != ConvertRegNumber(asmjit::x86::rax.id())) { GetMasm()->push(asmjit::x86::rax); @@ -1448,7 +1452,7 @@ void Amd64Encoder::EncodeAdd(Reg dst, Reg src, Imm imm) return; } - auto imm_val = ImmToSignedInt(imm); + auto imm_val = imm.GetAsInt(); auto size = std::max(WORD_SIZE, dst.GetSize()); if (ImmFitsSize(imm_val, size)) { GetMasm()->lea(ArchReg(dst, size), asmjit::x86::ptr(ArchReg(src, size), imm_val)); @@ -1471,7 +1475,7 @@ void Amd64Encoder::EncodeSub(Reg dst, Reg src, Imm imm) return; } - auto imm_val = -ImmToSignedInt(imm); + auto imm_val = -imm.GetAsInt(); auto size = std::max(WORD_SIZE, dst.GetSize()); if (ImmFitsSize(imm_val, size)) { GetMasm()->lea(ArchReg(dst, size), asmjit::x86::ptr(ArchReg(src, size), imm_val)); @@ -1514,7 +1518,7 @@ void Amd64Encoder::EncodeAnd(Reg dst, Reg src, Imm imm) ASSERT(dst.IsScalar()); auto imm_val = ImmToUnsignedInt(imm); - switch (imm.GetSize()) { + switch (src.GetSize()) { case BYTE_SIZE: imm_val |= ~uint64_t(0xFF); // NOLINT break; @@ -1600,12 +1604,12 @@ void Amd64Encoder::EncodeMov(Reg dst, Imm src) if (dst.GetType() == FLOAT32_TYPE) { ScopedTmpRegU32 tmp_reg(this); - auto val = bit_cast(src.GetValue()); + auto val = bit_cast(src.GetAsFloat()); GetMasm()->mov(ArchReg(tmp_reg), asmjit::imm(val)); GetMasm()->movd(ArchVReg(dst), ArchReg(tmp_reg)); } else { ScopedTmpRegU64 tmp_reg(this); - auto val = bit_cast(src.GetValue()); + auto val = bit_cast(src.GetAsDouble()); GetMasm()->mov(ArchReg(tmp_reg), asmjit::imm(val)); GetMasm()->movq(ArchVReg(dst), ArchReg(tmp_reg)); } @@ -1697,35 +1701,36 @@ void Amd64Encoder::EncodeStrz(Reg src, MemRef mem) } } -void Amd64Encoder::EncodeSti(Imm src, MemRef mem) +void Amd64Encoder::EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) { - if (src.IsFloat()) { - if (src.GetType() == FLOAT32_TYPE) { - EncodeSti(Imm(bit_cast(src.GetValue())), mem); - } else { - EncodeSti(Imm(bit_cast(src.GetValue())), mem); - } - return; - } - + ASSERT(src_size_bytes <= 8U); auto m = ArchMem(mem).Prepare(GetMasm()); - if (src.GetSize() <= HALF_SIZE) { - m.setSize(src.GetSize() / BITS_PER_BYTE); - GetMasm()->mov(m, ArchImm(src)); + if (src_size_bytes <= HALF_WORD_SIZE_BYTES) { + m.setSize(src_size_bytes); + GetMasm()->mov(m, asmjit::imm(src)); } else { - m.setSize(DOUBLE_WORD_SIZE_BYTE); + m.setSize(DOUBLE_WORD_SIZE_BYTES); - auto imm_val = ImmToSignedInt(src); - if (ImmFitsSize(imm_val, DOUBLE_WORD_SIZE)) { - GetMasm()->mov(m, asmjit::imm(imm_val)); + if (ImmFitsSize(src, DOUBLE_WORD_SIZE)) { + GetMasm()->mov(m, asmjit::imm(src)); } else { ScopedTmpRegU64 tmp_reg(this); - GetMasm()->mov(ArchReg(tmp_reg), asmjit::imm(imm_val)); + GetMasm()->mov(ArchReg(tmp_reg), asmjit::imm(src)); GetMasm()->mov(m, ArchReg(tmp_reg)); } } } +void Amd64Encoder::EncodeSti(float src, MemRef mem) +{ + EncodeSti(bit_cast(src), sizeof(int32_t), mem); +} + +void Amd64Encoder::EncodeSti(double src, MemRef mem) +{ + EncodeSti(bit_cast(src), sizeof(int64_t), mem); +} + void Amd64Encoder::EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) { ScopedTmpRegU64 tmp_reg(this); @@ -1854,7 +1859,7 @@ void Amd64Encoder::EncodeSelect(Reg dst, Reg src0, Reg src1, Reg src2, Imm imm, { ASSERT(!src0.IsFloat() && !src1.IsFloat() && !src2.IsFloat()); - auto imm_val = ImmToSignedInt(imm); + auto imm_val = imm.GetAsInt(); if (ImmFitsSize(imm_val, src2.GetSize())) { GetMasm()->cmp(ArchReg(src2), asmjit::imm(imm_val)); } else { @@ -1897,7 +1902,7 @@ void Amd64Encoder::EncodeSelectTest(Reg dst, Reg src0, Reg src1, Reg src2, Imm i { ASSERT(!src0.IsFloat() && !src1.IsFloat() && !src2.IsFloat()); - auto imm_val = ImmToSignedInt(imm); + auto imm_val = imm.GetAsInt(); if (ImmFitsSize(imm_val, src2.GetSize())) { GetMasm()->test(ArchReg(src2), asmjit::imm(imm_val)); } else { @@ -1929,22 +1934,22 @@ void Amd64Encoder::EncodeLdp(Reg dst0, Reg dst1, bool dst_signed, MemRef mem) if (dst0.GetType() == FLOAT32_TYPE) { GetMasm()->movss(ArchVReg(dst0), m); - m.addOffset(WORD_SIZE_BYTE); + m.addOffset(WORD_SIZE_BYTES); GetMasm()->movss(ArchVReg(dst1), m); } else { GetMasm()->movsd(ArchVReg(dst0), m); - m.addOffset(DOUBLE_WORD_SIZE_BYTE); + m.addOffset(DOUBLE_WORD_SIZE_BYTES); GetMasm()->movsd(ArchVReg(dst1), m); } return; } if (dst_signed && dst0.GetSize() == WORD_SIZE) { - m.setSize(WORD_SIZE_BYTE); + m.setSize(WORD_SIZE_BYTES); GetMasm()->movsxd(ArchReg(dst0, DOUBLE_WORD_SIZE), m); - m.addOffset(WORD_SIZE_BYTE); + m.addOffset(WORD_SIZE_BYTES); GetMasm()->movsxd(ArchReg(dst1, DOUBLE_WORD_SIZE), m); return; } @@ -1966,12 +1971,12 @@ void Amd64Encoder::EncodeStp(Reg src0, Reg src1, MemRef mem) if (src0.GetType() == FLOAT32_TYPE) { GetMasm()->movss(m, ArchVReg(src0)); - m.addOffset(WORD_SIZE_BYTE); + m.addOffset(WORD_SIZE_BYTES); GetMasm()->movss(m, ArchVReg(src1)); } else { GetMasm()->movsd(m, ArchVReg(src0)); - m.addOffset(DOUBLE_WORD_SIZE_BYTE); + m.addOffset(DOUBLE_WORD_SIZE_BYTES); GetMasm()->movsd(m, ArchVReg(src1)); } return; @@ -2182,6 +2187,12 @@ bool Amd64Encoder::CanEncodeScale(uint64_t imm, [[maybe_unused]] uint32_t size) bool Amd64Encoder::CanEncodeImmLogical(uint64_t imm, uint32_t size) { +#ifndef NDEBUG + if (size < DOUBLE_WORD_SIZE) { + // Test if the highest part is consistent: + ASSERT((imm >> size == 0) || ((~imm) >> size == 0)); + } +#endif // NDEBUG return ImmFitsSize(imm, size); } @@ -2423,7 +2434,7 @@ void Amd64Encoder::LoadStoreRegisters(RegMask registers, ssize_t slot, size_t st continue; } - asmjit::x86::Mem mem = asmjit::x86::ptr(asmjit::x86::rsp, (slot + i - start_reg) * DOUBLE_WORD_SIZE_BYTE); + asmjit::x86::Mem mem = asmjit::x86::ptr(asmjit::x86::rsp, (slot + i - start_reg) * DOUBLE_WORD_SIZE_BYTES); if constexpr (IS_STORE) { // NOLINT if (is_fp) { @@ -2464,7 +2475,7 @@ void Amd64Encoder::LoadStoreRegisters(RegMask registers, bool is_fp, int32_t slo } // `-1` because we've incremented `index` in advance - asmjit::x86::Mem mem = asmjit::x86::ptr(base_reg, (slot + index - 1) * DOUBLE_WORD_SIZE_BYTE); + asmjit::x86::Mem mem = asmjit::x86::ptr(base_reg, (slot + index - 1) * DOUBLE_WORD_SIZE_BYTES); if constexpr (IS_STORE) { // NOLINT if (is_fp) { @@ -2487,7 +2498,7 @@ void Amd64Encoder::PushRegisters(RegMask registers, bool is_fp, bool align) for (size_t i = 0; i < registers.size(); i++) { if (registers[i]) { if (is_fp) { - GetMasm()->sub(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTE); + GetMasm()->sub(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTES); GetMasm()->movsd(asmjit::x86::ptr(asmjit::x86::rsp), ArchVReg(Reg(i, FLOAT64_TYPE))); } else { GetMasm()->push(asmjit::x86::gpq(ConvertRegNumber(i))); @@ -2495,20 +2506,20 @@ void Amd64Encoder::PushRegisters(RegMask registers, bool is_fp, bool align) } } if (align && (registers.count() & 1U) != 0) { - GetMasm()->sub(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTE); + GetMasm()->sub(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTES); } } void Amd64Encoder::PopRegisters(RegMask registers, bool is_fp, bool align) { if (align && (registers.count() & 1U) != 0) { - GetMasm()->add(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTE); + GetMasm()->add(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTES); } for (ssize_t i = registers.size() - 1; i >= 0; i--) { if (registers[i]) { if (is_fp) { GetMasm()->movsd(ArchVReg(Reg(i, FLOAT64_TYPE)), asmjit::x86::ptr(asmjit::x86::rsp)); - GetMasm()->add(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTE); + GetMasm()->add(asmjit::x86::rsp, DOUBLE_WORD_SIZE_BYTES); } else { GetMasm()->pop(asmjit::x86::gpq(ConvertRegNumber(i))); } @@ -2520,7 +2531,7 @@ template void Amd64Encoder::CopyArrayToXmm(Reg xmm, const std::array &arr) { static constexpr auto SIZE {N * sizeof(T)}; - static_assert((SIZE == DOUBLE_WORD_SIZE_BYTE) || (SIZE == 2U * DOUBLE_WORD_SIZE_BYTE)); + static_assert((SIZE == DOUBLE_WORD_SIZE_BYTES) || (SIZE == 2U * DOUBLE_WORD_SIZE_BYTES)); ASSERT(xmm.GetType() == FLOAT64_TYPE); auto data {reinterpret_cast(arr.data())}; @@ -2530,7 +2541,7 @@ void Amd64Encoder::CopyArrayToXmm(Reg xmm, const std::array &arr) GetMasm()->mov(ArchReg(tmp_gpr), asmjit::imm(data[0])); GetMasm()->movq(ArchVReg(xmm), ArchReg(tmp_gpr)); - if constexpr (SIZE == 2U * DOUBLE_WORD_SIZE_BYTE) { + if constexpr (SIZE == 2U * DOUBLE_WORD_SIZE_BYTES) { ScopedTmpRegF64 tmp_xmm(this); // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) GetMasm()->mov(ArchReg(tmp_gpr), asmjit::imm(data[1])); @@ -2542,10 +2553,10 @@ void Amd64Encoder::CopyArrayToXmm(Reg xmm, const std::array &arr) template void Amd64Encoder::CopyImmToXmm(Reg xmm, T imm) { - static_assert((sizeof(imm) == WORD_SIZE_BYTE) || (sizeof(imm) == DOUBLE_WORD_SIZE_BYTE)); + static_assert((sizeof(imm) == WORD_SIZE_BYTES) || (sizeof(imm) == DOUBLE_WORD_SIZE_BYTES)); ASSERT(xmm.GetSize() == BYTE_SIZE * sizeof(imm)); - if constexpr (sizeof(imm) == WORD_SIZE_BYTE) { // NOLINT + if constexpr (sizeof(imm) == WORD_SIZE_BYTES) { // NOLINT ScopedTmpRegU32 tmp_gpr(this); GetMasm()->mov(ArchReg(tmp_gpr), asmjit::imm(bit_cast(imm))); GetMasm()->movd(ArchVReg(xmm), ArchReg(tmp_gpr)); diff --git a/compiler/optimizer/code_generator/target/amd64/target.h b/compiler/optimizer/code_generator/target/amd64/target.h index 72c5366f69159e35bc1e87fd8a6d989fd6a436c7..846604dc956a3e2546cfc055defa03cf0d83ed41 100644 --- a/compiler/optimizer/code_generator/target/amd64/target.h +++ b/compiler/optimizer/code_generator/target/amd64/target.h @@ -245,62 +245,14 @@ static inline asmjit::x86::Xmm ArchVReg(Reg reg) static inline asmjit::Imm ArchImm(Imm imm) { - ASSERT(imm.IsValid()); - if (imm.GetType() == INT64_TYPE) { - return asmjit::imm(imm.GetValue()); - } - if (imm.GetType() == INT32_TYPE) { - return asmjit::imm(imm.GetValue()); - } - if (imm.GetType() == INT16_TYPE) { - return asmjit::imm(imm.GetValue()); - } - if (imm.GetType() == INT8_TYPE) { - return asmjit::imm(imm.GetValue()); - } - // Invalid converted register - UNREACHABLE(); - return asmjit::imm(0); -} - -static inline int64_t ImmToSignedInt(Imm imm) -{ - ASSERT(imm.IsValid()); - if (imm.GetType() == INT64_TYPE) { - return imm.GetValue(); - } - if (imm.GetType() == INT32_TYPE) { - return imm.GetValue(); - } - if (imm.GetType() == INT16_TYPE) { - return imm.GetValue(); - } - if (imm.GetType() == INT8_TYPE) { - return imm.GetValue(); - } - // Invalid converted register - UNREACHABLE(); - return 0; + ASSERT(imm.GetType() == INT64_TYPE); + return asmjit::imm(imm.GetAsInt()); } static inline uint64_t ImmToUnsignedInt(Imm imm) { - ASSERT(imm.IsValid()); - if (imm.GetType() == INT64_TYPE) { - return uint64_t(imm.GetValue()); - } - if (imm.GetType() == INT32_TYPE) { - return uint32_t(imm.GetValue()); - } - if (imm.GetType() == INT16_TYPE) { - return uint16_t(imm.GetValue()); - } - if (imm.GetType() == INT8_TYPE) { - return uint8_t(imm.GetValue()); - } - // Invalid converted register - UNREACHABLE(); - return 0; + ASSERT(imm.GetType() == INT64_TYPE); + return uint64_t(imm.GetAsInt()); } static inline bool ImmFitsSize(int64_t imm, uint8_t size) @@ -714,7 +666,9 @@ public: void EncodeStrRelease(Reg src, MemRef mem) override; // zerod high part: [reg.size, 64) void EncodeStrz(Reg src, MemRef mem) override; - void EncodeSti(Imm src, MemRef mem) override; + void EncodeSti(int64_t src, uint8_t src_size_bytes, MemRef mem) override; + void EncodeSti(float src, MemRef mem) override; + void EncodeSti(double src, MemRef mem) override; // size must be 8, 16,32 or 64 void EncodeMemCopy(MemRef mem_from, MemRef mem_to, size_t size) override; // size must be 8, 16,32 or 64 diff --git a/compiler/optimizer/code_generator/target/target.cpp b/compiler/optimizer/code_generator/target/target.cpp index c7cfc9cc911852771b018b39472eb932a971e7f8..94741b97b1118a067c7058d5e80a367f65f3c178 100644 --- a/compiler/optimizer/code_generator/target/target.cpp +++ b/compiler/optimizer/code_generator/target/target.cpp @@ -138,7 +138,7 @@ CallingConvention *CallingConvention::Create([[maybe_unused]] ArenaAllocator *ar [[maybe_unused]] bool print_asm) { [[maybe_unused]] auto mode = - CallConvMode::Panda(is_panda_abi) | CallConvMode::Osr(is_osr) | CallConvMode::Dyn(is_dyn); + CallConvMode::Panda(is_panda_abi) | CallConvMode::Osr(is_osr) | CallConvMode::DynCall(is_dyn); switch (arch) { #ifdef PANDA_COMPILER_TARGET_AARCH32 case Arch::AARCH32: { diff --git a/compiler/optimizer/ir/graph.cpp b/compiler/optimizer/ir/graph.cpp index 5b4aaf5833584c9c802948e7e7ad5dfd9422ca88..567447548cc19850ee01882d57918653a830e861 100644 --- a/compiler/optimizer/ir/graph.cpp +++ b/compiler/optimizer/ir/graph.cpp @@ -465,7 +465,8 @@ CallingConvention *Graph::GetCallingConvention() // This doesn't require an encoder, so we don't create one callconv_ = CallingConvention::Create(GetAllocator(), encoder_, GetRegisters(), GetArch(), // is_panda_abi, is_osr, is_dyn - mode_.SupportManagedCode(), IsOsrMode(), IsDynamicMethod()); + mode_.SupportManagedCode(), IsOsrMode(), + IsDynamicMethod() && !GetMode().IsDynamicStub()); } return callconv_; #endif diff --git a/compiler/optimizer/ir/graph.h b/compiler/optimizer/ir/graph.h index b10e6babdf695497b49c3ba3690a2effbadca83d..4fe114f2d71ff551ddc120422561da7a1f513b1a 100644 --- a/compiler/optimizer/ir/graph.h +++ b/compiler/optimizer/ir/graph.h @@ -78,6 +78,8 @@ public: DECLARE_GRAPH_MODE(BytecodeOpt); // The method from dynamic language DECLARE_GRAPH_MODE(DynamicMethod); + // The method from dynamic language uses common calling convention + DECLARE_GRAPH_MODE(DynamicStub); // Graph will be compiled with native calling convention DECLARE_GRAPH_MODE(Native); // FastPath from compiled code to runtime @@ -102,7 +104,8 @@ private: using FlagOsr = BitField; using FlagBytecodeOpt = FlagOsr::NextFlag; using FlagDynamicMethod = FlagBytecodeOpt::NextFlag; - using FlagNative = FlagDynamicMethod::NextFlag; + using FlagDynamicStub = FlagDynamicMethod::NextFlag; + using FlagNative = FlagDynamicStub::NextFlag; using FlagFastPath = FlagNative::NextFlag; using FlagBoundary = FlagFastPath::NextFlag; using FlagInterpreter = FlagBoundary::NextFlag; @@ -1013,6 +1016,11 @@ public: mode_.SetDynamicMethod(true); } + void SetDynamicStub() + { + mode_.SetDynamicStub(true); + } + auto &GetSingleImplementationList() { return single_implementation_list_; diff --git a/compiler/optimizer/ir/ir_constructor.h b/compiler/optimizer/ir/ir_constructor.h index 4ca402c4369dde4749a2c177e183868e4305605c..97be3514ee970f09ea8d528d982b1f6e3338953e 100644 --- a/compiler/optimizer/ir/ir_constructor.h +++ b/compiler/optimizer/ir/ir_constructor.h @@ -1114,10 +1114,9 @@ public: if (inst->GetOpcode() != Opcode::Parameter) { continue; } - ++i; auto type = inst->GetType(); - InstBuilder::SetParamSpillFill(graph_, static_cast(inst), num_args, i - 1, type); + InstBuilder::SetParamSpillFill(graph_, static_cast(inst), num_args, i++, type); } } diff --git a/compiler/optimizer/ir/runtime_interface.h b/compiler/optimizer/ir/runtime_interface.h index 0a7072e060391bac6b9c5455b2a88fdc1ec1cf2f..8695d2490f76a91162b71bbdc3f3dbd38a09e868 100644 --- a/compiler/optimizer/ir/runtime_interface.h +++ b/compiler/optimizer/ir/runtime_interface.h @@ -437,10 +437,6 @@ public: { return panda::cross_values::GetManagedThreadConcurrentMarkingAddrOffset(arch); } - uint32_t GetLanguageExtensionsDataOffset([[maybe_unused]] Arch arch) const - { - return panda::cross_values::GetManagedThreadLanguageExtensionDataOffset(arch); - } virtual ::panda::mem::BarrierType GetPreType() const { @@ -940,6 +936,21 @@ public: return static_cast(coretypes::TaggedValue::True().GetRawData()); } + virtual uint64_t DynamicCastDoubleToInt([[maybe_unused]] double value, [[maybe_unused]] size_t bits) const + { + return 0; + } + + virtual uint8_t GetDynamicNumFixedArgs() const + { + return 0; + } + + virtual size_t GetLanguageExtensionSize([[maybe_unused]] Arch arch) const + { + return 0; + } + virtual uint32_t GetNativePointerTargetOffset([[maybe_unused]] Arch arch) const { return 0; diff --git a/compiler/optimizer/ir_builder/inst_builder-inl.h b/compiler/optimizer/ir_builder/inst_builder-inl.h index e30eccc66f4e7eb13ae9f299ec34a23e86fa2db7..8460a354cdb24e5993a86250b0732b1b9dd04b32 100644 --- a/compiler/optimizer/ir_builder/inst_builder-inl.h +++ b/compiler/optimizer/ir_builder/inst_builder-inl.h @@ -857,8 +857,8 @@ void InstBuilder::BuildUnfoldLoadConstArray(const BytecodeInstruction *bc_inst, // NOLINTNEXTLINE(misc-definitions-in-headers) void InstBuilder::BuildLoadConstArray(const BytecodeInstruction *bc_inst) { - auto literal_array_id = bc_inst->GetId(0).AsFileId().GetOffset(); - auto lit_array = GetRuntime()->GetLiteralArray(GetMethod(), literal_array_id); + auto literal_array_idx = bc_inst->GetId(0).AsIndex(); + auto lit_array = GetRuntime()->GetLiteralArray(GetMethod(), literal_array_idx); auto array_size = lit_array.literals.size(); ASSERT(array_size > 0); @@ -904,7 +904,7 @@ void InstBuilder::BuildLoadConstArray(const BytecodeInstruction *bc_inst) auto save_state = CreateSaveState(Opcode::SaveState, GetPc(bc_inst->GetAddress())); auto method = GetGraph()->GetMethod(); auto inst = GetGraph()->CreateInstLoadConstArray(DataType::REFERENCE, GetPc(bc_inst->GetAddress())); - inst->SetTypeId(literal_array_id); + inst->SetTypeId(literal_array_idx); inst->SetMethod(method); inst->SetInput(0, save_state); AddInstruction(save_state); diff --git a/compiler/optimizer/ir_builder/inst_builder.cpp b/compiler/optimizer/ir_builder/inst_builder.cpp index d6b12133f7eb57c2bc360fa7cf2c0e035e073d7e..5767c0c2ad715df24bf35ebbe512c47225cad767 100644 --- a/compiler/optimizer/ir_builder/inst_builder.cpp +++ b/compiler/optimizer/ir_builder/inst_builder.cpp @@ -18,6 +18,9 @@ #include "optimizer/code_generator/encode.h" #include "compiler_logger.h" #include "runtime/profiling/profiling.h" +#ifndef PANDA_TARGET_WINDOWS +#include "callconv.h" +#endif namespace panda::compiler { @@ -207,7 +210,15 @@ void InstBuilder::SetParamSpillFill(Graph *graph, ParameterInst *param_inst, siz param_inst->SetLocationData({LocationType::REGISTER, LocationType::REGISTER, reg_src, reg_src, reg_type}); } else { #ifndef PANDA_TARGET_WINDOWS - param_inst->SetLocationData(graph->GetDataForNativeParam(type)); + if (graph->IsDynamicMethod() && !graph->GetMode().IsDynamicStub()) { + ASSERT(type == DataType::ANY); + uint16_t slot = i + CallConvDynInfo::FIXED_SLOT_COUNT; + ASSERT(slot <= UINT8_MAX); + param_inst->SetLocationData( + {LocationType::STACK_PARAMETER, LocationType::INVALID, slot, INVALID_REG, DataType::UINT64}); + } else { + param_inst->SetLocationData(graph->GetDataForNativeParam(type)); + } #endif } } diff --git a/compiler/optimizer/ir_builder/inst_builder.h b/compiler/optimizer/ir_builder/inst_builder.h index 5fb6284877955833aa5eb5c5dd64f22e93bbec59..83a1fae5dd127cb97693ca3a30d9ae029d48dc47 100644 --- a/compiler/optimizer/ir_builder/inst_builder.h +++ b/compiler/optimizer/ir_builder/inst_builder.h @@ -284,8 +284,6 @@ private: template void BuildCall(const BytecodeInstruction *bc_inst, bool is_range, bool acc_read); - void BuildDynamicCall(const BytecodeInstruction *bc_inst, bool is_range, bool call_this, AnyBaseType type, - uint64_t num_args = 0); template CallInst *BuildCallInst(RuntimeInterface::MethodPtr method, uint32_t method_id, size_t pc); template diff --git a/compiler/optimizer/optimizations/const_folding.cpp b/compiler/optimizer/optimizations/const_folding.cpp index 8cfae85baee20095bff5533b98c65270ea168f06..095da7d51d0695186ae997d1bbbbb853a746e3b9 100644 --- a/compiler/optimizer/optimizations/const_folding.cpp +++ b/compiler/optimizer/optimizations/const_folding.cpp @@ -130,6 +130,12 @@ uint64_t ConvertFloatToInt(From value, DataType::Type target_type) } } +template +uint64_t ConvertFloatToIntDyn(From value, RuntimeInterface *runtime, size_t bits) +{ + return runtime->DynamicCastDoubleToInt(static_cast(value), bits); +} + ConstantInst *ConstFoldingCreateIntConst(Inst *inst, uint64_t value, bool is_literal_data) { auto graph = inst->GetBasicBlock()->GetGraph(); @@ -220,8 +226,11 @@ ConstantInst *ConstFoldingCastConst(Inst *inst, Inst *input, bool is_literal_dat } else if (cnst->GetType() == DataType::FLOAT64) { if (inst_type == DataType::INT64) { // DOUBLE->INT/LONG - return ConstFoldingCreateIntConst(inst, ConvertFloatToInt(cnst->GetDoubleValue(), inst->GetType()), - is_literal_data); + uint64_t val = graph->IsDynamicMethod() + ? ConvertFloatToIntDyn(cnst->GetDoubleValue(), graph->GetRuntime(), + DataType::GetTypeSize(inst->GetType(), graph->GetArch())) + : ConvertFloatToInt(cnst->GetDoubleValue(), inst->GetType()); + return ConstFoldingCreateIntConst(inst, val, is_literal_data); } if (inst_type == DataType::FLOAT32) { // DOUBLE -> FLOAT diff --git a/compiler/optimizer/optimizations/licm_conditions.cpp b/compiler/optimizer/optimizations/licm_conditions.cpp index 708dbde5c5687b9688839246d92873c602d2ff2f..93f02ad082151b38e6f3a9be76f086a516272ee9 100644 --- a/compiler/optimizer/optimizations/licm_conditions.cpp +++ b/compiler/optimizer/optimizations/licm_conditions.cpp @@ -380,7 +380,11 @@ void LicmConditions::UpdatePhis(const ConditionChain *chain, BasicBlock *multipl phi_block->AppendPhi(phi); if (phi->GetInputsCount() < phi_block->GetPredsBlocks().size()) { COMPILER_LOG(DEBUG, LICM_COND_OPT) << "Add dummy input"; - phi->AppendInput(GetGraph()->FindOrCreateConstant(0)); + if (DataType::IsReference(phi->GetType())) { + phi->AppendInput(GetGraph()->GetOrCreateNullPtr()); + } else { + phi->AppendInput(GetGraph()->FindOrCreateConstant(0)); + } } ASSERT(phi->GetInputsCount() == phi_block->GetPredsBlocks().size()); } else { diff --git a/compiler/optimizer/optimizations/locations_builder.cpp b/compiler/optimizer/optimizations/locations_builder.cpp index 1b6267ada59e17691119d7d02a577bea2ee4eb47..129b7d14d543919d01adab09fb3ee04c82cbb66b 100644 --- a/compiler/optimizer/optimizations/locations_builder.cpp +++ b/compiler/optimizer/optimizations/locations_builder.cpp @@ -97,6 +97,38 @@ LOCATIONS_BUILDER(void)::ProcessManagedCall(Inst *inst, ParameterInfo *pinfo) GetGraph()->UpdateStackSlotsCount(stack_args); } +LOCATIONS_BUILDER(void)::ProcessManagedCallStackRange(Inst *inst, size_t range_start, ParameterInfo *pinfo) +{ + ArenaAllocator *allocator = GetGraph()->GetAllocator(); + LocationsInfo *locations = allocator->New(allocator, inst); + + /* Reserve first parameter for the callee method. It will be set by codegen. */ + if (pinfo == nullptr) { + pinfo = GetResetParameterInfo(); + pinfo->GetNextLocation(GetWordType()); + } + + size_t inputs_count = inst->GetInputsCount() - (inst->RequireState() ? 1 : 0); + size_t stack_args = 0; + ASSERT(inputs_count >= range_start); + for (size_t i = 0; i < range_start; i++) { + ASSERT(inst->GetInputType(i) != DataType::NO_TYPE); + auto param = pinfo->GetNextLocation(inst->GetInputType(i)); + locations->SetLocation(i, param); + if (param.IsStackArgument()) { + stack_args++; + } + } + if (inst->IsIntrinsic() && stack_args > 0) { + inst->CastToIntrinsic()->SetArgumentsOnStack(); + } + for (size_t i = range_start; i < inputs_count; i++) { + locations->SetLocation(i, Location::MakeStackArgument(stack_args++)); + } + locations->SetDstLocation(GetLocationForReturn(inst)); + GetGraph()->UpdateStackSlotsCount(stack_args); +} + LOCATIONS_BUILDER(void)::VisitCallStatic(GraphVisitor *visitor, Inst *inst) { if (inst->CastToCallStatic()->IsInlined()) { @@ -135,11 +167,23 @@ LOCATIONS_BUILDER(void)::VisitCallDynamic(GraphVisitor *visitor, Inst *inst) return; } + ArenaAllocator *allocator = static_cast(visitor)->GetGraph()->GetAllocator(); + LocationsInfo *locations = allocator->New(allocator, inst); + auto pinfo = static_cast(visitor)->GetResetParameterInfo(); - pinfo->GetNextLocation(GetWordType()); // Reserved for Method* - pinfo->GetNextLocation(GetWordType()); // Reserved for num_args + for (uint8_t i = 0; i < CallConvDynInfo::REG_COUNT; i++) { + [[maybe_unused]] Location loc = pinfo->GetNextLocation(GetWordType()); + ASSERT(loc.IsRegister()); + } + size_t inputs_count = inst->GetInputsCount() - (inst->RequireState() ? 1 : 0); - static_cast(visitor)->ProcessManagedCall(inst, pinfo); + for (size_t i = 0; i < inputs_count; ++i) { + ASSERT(inst->GetInputType(i) == DataType::ANY); + auto param = Location::MakeStackArgument(i + CallConvDynInfo::FIXED_SLOT_COUNT); + locations->SetLocation(i, param); + } + locations->SetDstLocation(GetLocationForReturn(inst)); + static_cast(visitor)->GetGraph()->UpdateStackSlotsCount(inputs_count); } LOCATIONS_BUILDER(void)::VisitCallIndirect(GraphVisitor *visitor, Inst *inst) @@ -187,7 +231,13 @@ LOCATIONS_BUILDER(void)::VisitIntrinsic(GraphVisitor *visitor, Inst *inst) pinfo->GetNextLocation(DataType::INT32); } } - static_cast(visitor)->ProcessManagedCall(inst, pinfo); + size_t explicit_args; + if (inst->GetOpcode() == Opcode::Intrinsic && + IsStackRangeIntrinsic(intrinsic->GetIntrinsicId(), &explicit_args)) { + static_cast(visitor)->ProcessManagedCallStackRange(inst, explicit_args, pinfo); + } else { + static_cast(visitor)->ProcessManagedCall(inst, pinfo); + } return; } diff --git a/compiler/optimizer/optimizations/locations_builder.h b/compiler/optimizer/optimizations/locations_builder.h index 0ee89d40c9a6827492932cf943929f2e1f85d611..9001609a310a6ea6ca938f0f22d5db365d655448 100644 --- a/compiler/optimizer/optimizations/locations_builder.h +++ b/compiler/optimizer/optimizations/locations_builder.h @@ -69,6 +69,7 @@ public: static void VisitStoreStatic(GraphVisitor *visitor, Inst *inst); void ProcessManagedCall(Inst *inst, ParameterInfo *pinfo = nullptr); + void ProcessManagedCallStackRange(Inst *inst, size_t range_start, ParameterInfo *pinfo = nullptr); private: ParameterInfo *GetResetParameterInfo(); diff --git a/compiler/optimizer/optimizations/lse.cpp b/compiler/optimizer/optimizations/lse.cpp index 759aca7029ff473a0e94bf711c17f4a8bfdab484..9b041363bc3cce3a1114b2077bd9781482f7db1b 100644 --- a/compiler/optimizer/optimizations/lse.cpp +++ b/compiler/optimizer/optimizations/lse.cpp @@ -320,6 +320,8 @@ static bool IsHeapInvalidatingInst(Inst *inst) return !inst->CastToCallVirtual()->IsInlined(); case Opcode::CallStatic: return !inst->CastToCallStatic()->IsInlined(); + case Opcode::CallDynamic: + return !inst->CastToCallDynamic()->IsInlined(); default: return inst->GetFlag(compiler::inst_flags::HEAP_INV); } diff --git a/compiler/optimizer/optimizations/peepholes.cpp b/compiler/optimizer/optimizations/peepholes.cpp index d998868e5e252c0c0d592ed667fe698ed24c151a..6a9fbc2f115ea3f6be7ed601b58742cf23ab43a0 100644 --- a/compiler/optimizer/optimizations/peepholes.cpp +++ b/compiler/optimizer/optimizations/peepholes.cpp @@ -784,7 +784,15 @@ bool Peepholes::TrySimplifyCompareAnyType(Inst *inst) if (cmp_inst->GetOperandsType() != DataType::ANY) { return false; } + auto input0 = cmp_inst->GetInput(0).GetInst(); + auto input1 = cmp_inst->GetInput(1).GetInst(); + auto cc = cmp_inst->GetCc(); + if (input0 == input1 && (cc == CC_EQ || cc == CC_NE)) { + cmp_inst->ReplaceUsers(graph->FindOrCreateConstant(cc == CC_EQ ? 1 : 0)); + return true; + } + if (input0->GetOpcode() != Opcode::CastValueToAnyType) { return false; } @@ -792,7 +800,6 @@ bool Peepholes::TrySimplifyCompareAnyType(Inst *inst) return false; } - auto input1 = cmp_inst->GetInput(1).GetInst(); if (input1->GetOpcode() == Opcode::CastValueToAnyType) { // 2.any CastValueToAnyType BOOLEAN_TYPE v0 -> (v4) // 3.any CastValueToAnyType BOOLEAN_TYPE v1 -> (v4) @@ -828,7 +835,6 @@ bool Peepholes::TrySimplifyCompareAnyType(Inst *inst) // In this case, we can change the Compare to Constant instruction. // NOTE! It is assumed that there is only one Boolean type for each dynamic language. // Support for multiple Boolean types must be maintained separately. - auto cc = cmp_inst->GetCc(); if (cc != CC_EQ && cc != CC_NE) { return false; } diff --git a/compiler/optimizer/optimizations/regalloc/reg_alloc_base.cpp b/compiler/optimizer/optimizations/regalloc/reg_alloc_base.cpp index a49d4cb97681c2bd91ca3e6fb95e292d5e90c03a..e2a9a0aed2a6fafa26ff9361239c89ae1b6f5d45 100644 --- a/compiler/optimizer/optimizations/regalloc/reg_alloc_base.cpp +++ b/compiler/optimizer/optimizations/regalloc/reg_alloc_base.cpp @@ -254,7 +254,8 @@ size_t RegAllocBase::GetTotalSlotsCount() auto spill_slots_count = GetStackMask().GetUsedCount(); size_t lang_ext_slots = 0U; if (GetGraph()->GetArch() != Arch::NONE) { - lang_ext_slots = GetGraph()->GetRuntime()->GetLanguageExtensionSize() / PointerSize(GetGraph()->GetArch()); + lang_ext_slots = GetGraph()->GetRuntime()->GetLanguageExtensionSize(GetGraph()->GetArch()) / + PointerSize(GetGraph()->GetArch()); } // language extension slots lies after spill slots diff --git a/compiler/optimizer/optimizations/regalloc/reg_alloc_linear_scan.cpp b/compiler/optimizer/optimizations/regalloc/reg_alloc_linear_scan.cpp index 8c87c8e3716741edaaf2098eeb43b6d969bae0a9..77405bc237924b5f1211661b0db5416ef4e84167 100644 --- a/compiler/optimizer/optimizations/regalloc/reg_alloc_linear_scan.cpp +++ b/compiler/optimizer/optimizations/regalloc/reg_alloc_linear_scan.cpp @@ -215,9 +215,6 @@ void RegAllocLinearScan::WalkIntervals() if (current_interval->GetLocation().IsStackParameter()) { ASSERT(current_interval->GetInst()->IsParameter()); COMPILER_LOG(DEBUG, REGALLOC) << "Interval was defined in the stack parameter slot"; - if (GetGraph()->IsDynamicMethod()) { - AssignStackSlot(current_interval); - } auto next_use = current_interval->GetNextUsage(current_interval->GetBegin() + 1U); SplitBeforeUse(current_interval, next_use); return; diff --git a/compiler/optimizer/templates/arch_info_gen.h.erb b/compiler/optimizer/templates/arch_info_gen.h.erb index 5f1cb0174b5eb7c1fdcbd9104ed776607d8ff333..900799a46c6f1711569333a9a24eee23ac51e0d1 100644 --- a/compiler/optimizer/templates/arch_info_gen.h.erb +++ b/compiler/optimizer/templates/arch_info_gen.h.erb @@ -19,8 +19,8 @@ #include "utils/regmask.h" namespace panda::compiler::arch_info { - % IR::arch_info.each do | data | +// NOLINTNEXTLINE(readability-identifier-naming) struct <%= data['name'] %> { static constexpr RegMask TEMP_REGS = ::panda::MakeMask(<%= data['temp_regs'].join(', ') %>); static constexpr RegMask TEMP_FP_REGS = ::panda::MakeMask(<%= data['fp_temp_regs'].join(', ') %>); diff --git a/compiler/optimizer/templates/codegen_language_extensions.h.erb b/compiler/optimizer/templates/codegen_language_extensions.h.erb index 83b2d683b36316d7a6a2c896f9e2f4e6ef9acfb0..7fccb3aba29a04622171d1fee3e361bd15ad6e5c 100644 --- a/compiler/optimizer/templates/codegen_language_extensions.h.erb +++ b/compiler/optimizer/templates/codegen_language_extensions.h.erb @@ -19,29 +19,32 @@ #include "<%= plugin_opts["compiler_extensions"]["header_path_implementation_codegen"] %>" % end -inline bool GenerateExtensionsForPrologue() +inline void GenerateExtensionsForPrologue([[maybe_unused]] SourceLanguage language) { -% Common::plugins.each_value do |plugin_opts| + switch(language) { // NOLINT(hicpp-multiway-paths-covered) +% Common::plugins.each do |plugin_lang, plugin_opts| % next unless plugin_opts["compiler_extensions"] % next unless plugin_opts["compiler_extensions"]["function_codegen_prologue"] - if (<%= plugin_opts["compiler_extensions"]["function_codegen_prologue"] %>()) { - return true; // NOLINT(readability-simplify-boolean-expr) - } + case SourceLanguage::<%= plugin_lang %>: + <%= plugin_opts["compiler_extensions"]["function_codegen_prologue"] %>(); + return; % end - - return false; + default: + return; + } } -inline bool GenerateExtensionsForEpilogue() +inline void GenerateExtensionsForEpilogue([[maybe_unused]] SourceLanguage language) { -% Common::plugins.each_value do |plugin_opts| + switch(language) { // NOLINT(hicpp-multiway-paths-covered) +% Common::plugins.each do |plugin_lang, plugin_opts| % next unless plugin_opts["compiler_extensions"] % next unless plugin_opts["compiler_extensions"]["function_codegen_epilogue"] - if (<%= plugin_opts["compiler_extensions"]["function_codegen_epilogue"] %>()) { - return true; // NOLINT(readability-simplify-boolean-expr) - } + case SourceLanguage::<%= plugin_lang %>: + <%= plugin_opts["compiler_extensions"]["function_codegen_epilogue"] %>(); + return; % end - - return false; + default: + return; + } } - diff --git a/compiler/optimizer/templates/compiler_interface_extensions.inl.h.erb b/compiler/optimizer/templates/compiler_interface_extensions.inl.h.erb index 3497ad199cc460e018cf496f6cc9ee67c84e97e0..abcfa08a08196d1547d1754b2993158e4eb78297 100644 --- a/compiler/optimizer/templates/compiler_interface_extensions.inl.h.erb +++ b/compiler/optimizer/templates/compiler_interface_extensions.inl.h.erb @@ -20,8 +20,3 @@ % next unless plugin_opts["compiler_extensions"]["header_path_compiler_interface_extension"] #include "<%= plugin_opts["compiler_extensions"]["header_path_compiler_interface_extension"] %>" % end - -virtual size_t GetLanguageExtensionSize() -{ - return 0; -} \ No newline at end of file diff --git a/compiler/optimizer/templates/inst_builder_gen.cpp.erb b/compiler/optimizer/templates/inst_builder_gen.cpp.erb index c139f050adb5de7f9dbc0a108f7f0482e7bcbe41..ebc6201b1ab568350694e1c89a5f50a420ba3556 100644 --- a/compiler/optimizer/templates/inst_builder_gen.cpp.erb +++ b/compiler/optimizer/templates/inst_builder_gen.cpp.erb @@ -39,7 +39,7 @@ Instruction.class_eval do op = inputs[index] return "GetDefinitionAcc()" if op.acc? return "FindOrCreateConstant(instruction->GetImm<#{get_format}, #{get_input_idx(index, :imm?)}>())" if op.imm? - return "GetDefinition(instruction->GetId<#{get_format}, #{get_input_idx(index, :id?)}>().GetOffset())" if op.id? + return "GetDefinition(instruction->GetId<#{get_format}, BytecodeInstruction::Opcode, #{get_input_idx(index, :id?)}>().GetOffset())" if op.id? raise "Invalid operand" unless op.reg? return "GetDefinition(instruction->GetVReg<#{get_format}, #{get_input_idx(index, :reg?)}>())" end diff --git a/compiler/optimizer/templates/intrinsics/compiler_intrinsics.rb b/compiler/optimizer/templates/intrinsics/compiler_intrinsics.rb index 134e2c87ed85e2a5a94b1d797a3cb61de98833b5..c5a50f20548ec068a1dc0aab93c7a136c629e31a 100755 --- a/compiler/optimizer/templates/intrinsics/compiler_intrinsics.rb +++ b/compiler/optimizer/templates/intrinsics/compiler_intrinsics.rb @@ -59,6 +59,10 @@ class Intrinsic < SimpleDelegator def is_dynamic? signature.ret == "any" || signature.args.include?("any") end + + def is_stackrange? + signature.stackrange == true + end end module Compiler diff --git a/compiler/optimizer/templates/intrinsics/intrinsic_flags_test.inl.erb b/compiler/optimizer/templates/intrinsics/intrinsic_flags_test.inl.erb index 9d22a4dca5227087454a44c0545007bad67cf713..27e27859e923120d19b725e212eba3f565d94d45 100644 --- a/compiler/optimizer/templates/intrinsics/intrinsic_flags_test.inl.erb +++ b/compiler/optimizer/templates/intrinsics/intrinsic_flags_test.inl.erb @@ -16,11 +16,11 @@ // Autogenerated file -- DO NOT EDIT! % Compiler::intrinsics.each do |intrinsic| { - constexpr auto clear_flags = <%= intrinsic.clear_flags.empty? ? "0U" : intrinsic.clear_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; - constexpr auto set_flags = <%= intrinsic.set_flags.empty? ? "0U" : intrinsic.set_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; + constexpr auto CLEAR_FLAGS = <%= intrinsic.clear_flags.empty? ? "0U" : intrinsic.clear_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; + constexpr auto SET_FLAGS = <%= intrinsic.set_flags.empty? ? "0U" : intrinsic.set_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; auto inst= Inst::New(&allocator, Opcode::Intrinsic); - inst->ClearFlag(static_cast(set_flags)); - inst->SetFlag(static_cast(clear_flags)); + inst->ClearFlag(static_cast(SET_FLAGS)); + inst->SetFlag(static_cast(CLEAR_FLAGS)); // expected flags after inverse flags adjustment auto expected_flags = inst_flags::NO_DCE | inst_flags::NO_CSE | inst_flags::NO_HOIST | inst_flags::BARRIER | inst_flags::REQUIRE_STATE | inst_flags::RUNTIME_CALL; ASSERT_EQ(expected_flags, inst->GetFlagsMask()) << "Fix <%= intrinsic.enum_name %> intrinsic flags"; diff --git a/compiler/optimizer/templates/intrinsics/intrinsics_flags.inl.erb b/compiler/optimizer/templates/intrinsics/intrinsics_flags.inl.erb index 47c0d1f0a463a56dba486c06a941a253a7ca951d..cb756986f58a2b0c003388277b16fb1ebd222e97 100644 --- a/compiler/optimizer/templates/intrinsics/intrinsics_flags.inl.erb +++ b/compiler/optimizer/templates/intrinsics/intrinsics_flags.inl.erb @@ -23,18 +23,18 @@ inline void AdjustFlags([[maybe_unused]] RuntimeInterface::IntrinsicId intrinsic % Compiler::intrinsics.select {|intrinsic| !intrinsic.clear_flags.empty? or !intrinsic.set_flags.empty?}.each do |intrinsic| case RuntimeInterface::IntrinsicId::<%= intrinsic.entrypoint_name %>: { - constexpr auto clear_flags = <%= intrinsic.clear_flags.empty? ? "0U" : intrinsic.clear_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; - constexpr auto set_flags = <%= intrinsic.set_flags.empty? ? "0U" : intrinsic.set_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; - static_assert((set_flags & clear_flags) == 0, "<%= intrinsic.enum_name %> clear_flags cannot intersect set_flags"); + constexpr auto CLEAR_FLAGS = <%= intrinsic.clear_flags.empty? ? "0U" : intrinsic.clear_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; + constexpr auto SET_FLAGS = <%= intrinsic.set_flags.empty? ? "0U" : intrinsic.set_flags.collect { |f| "compiler::inst_flags::" + f.upcase }.join("|") %>; + static_assert((SET_FLAGS & CLEAR_FLAGS) == 0, "<%= intrinsic.enum_name %> CLEAR_FLAGS cannot intersect SET_FLAGS"); % if !intrinsic.static - static_assert((clear_flags & compiler::inst_flags::REQUIRE_STATE) == 0, "<%= intrinsic.enum_name %> requires save_state because virtual call might cause NPE"); + static_assert((CLEAR_FLAGS & compiler::inst_flags::REQUIRE_STATE) == 0, "<%= intrinsic.enum_name %> requires save_state because virtual call might cause NPE"); % end % if !intrinsic.clear_flags.empty? - inst->ClearFlag(static_cast(clear_flags)); + inst->ClearFlag(static_cast(CLEAR_FLAGS)); % end % if !intrinsic.set_flags.empty? - static_assert((set_flags & compiler::inst_flags::CAN_THROW) == 0 || (clear_flags & (compiler::inst_flags::REQUIRE_STATE | compiler::inst_flags::RUNTIME_CALL)) == 0, "<%= intrinsic.enum_name %> cannot set can_throw flag"); - inst->SetFlag(static_cast(set_flags)); + static_assert((SET_FLAGS & compiler::inst_flags::CAN_THROW) == 0 || (CLEAR_FLAGS & (compiler::inst_flags::REQUIRE_STATE | compiler::inst_flags::RUNTIME_CALL)) == 0, "<%= intrinsic.enum_name %> cannot set can_throw flag"); + inst->SetFlag(static_cast(SET_FLAGS)); % end break; } @@ -57,3 +57,16 @@ inline bool IsIrtocIntrinsic(RuntimeInterface::IntrinsicId intrinsic) return false; } } + +inline bool IsStackRangeIntrinsic(RuntimeInterface::IntrinsicId intrinsic, [[maybe_unused]] size_t *explicit_args) +{ + switch (intrinsic) { +% Compiler::intrinsics.select(&:is_stackrange?).each do |intrinsic| + case RuntimeInterface::IntrinsicId::<%= intrinsic.entrypoint_name %>: + *explicit_args = <%= intrinsic.arguments.length() - 1 %>; + return true; +% end + default: + return false; + } +} diff --git a/compiler/optimizer/templates/intrinsics/intrinsics_ir_build.inl.erb b/compiler/optimizer/templates/intrinsics/intrinsics_ir_build.inl.erb index b8d29478b7e2d049c24658f4c9e7de9119733a7f..6f3b9393f446b3a4870ae0c85660ffc24f1c397f 100644 --- a/compiler/optimizer/templates/intrinsics/intrinsics_ir_build.inl.erb +++ b/compiler/optimizer/templates/intrinsics/intrinsics_ir_build.inl.erb @@ -27,14 +27,14 @@ inline bool NeedSafePointAfterIntrinsic(RuntimeInterface::IntrinsicId intrinsic) } } -template +template void InstBuilder::AddArgNullcheckIfNeeded([[maybe_unused]] RuntimeInterface::IntrinsicId intrinsic, [[maybe_unused]] Inst *inst, [[maybe_unused]] Inst *save_state, [[maybe_unused]] size_t bc_addr) { - static_assert(is_virtual, "It is not implemented for static call"); + static_assert(IS_VIRTUAL, "It is not implemented for static call"); % if Compiler::intrinsics.any? {|intrinsic| !intrinsic.need_nullcheck.empty?} - constexpr int arg_offset = is_virtual ? 1 : 0; + constexpr int ARG_OFFSET = IS_VIRTUAL ? 1 : 0; // NOLINTNEXTLINE(hicpp-multiway-paths-covered) switch (intrinsic) { @@ -43,9 +43,9 @@ void InstBuilder::AddArgNullcheckIfNeeded([[maybe_unused]] RuntimeInterface::Int % intrinsic.need_nullcheck.each do |arg_num| { auto null_check = graph_->CreateInstNullCheck(DataType::REFERENCE, bc_addr); - null_check->SetInput(0, inst->GetInput(arg_offset + <%= arg_num %>).GetInst()); + null_check->SetInput(0, inst->GetInput(ARG_OFFSET + <%= arg_num %>).GetInst()); null_check->SetInput(1, save_state); - inst->SetInput(arg_offset + <%= arg_num %>, null_check); + inst->SetInput(ARG_OFFSET + <%= arg_num %>, null_check); AddInstruction(null_check); } % end diff --git a/compiler/tests/codegen_test.cpp b/compiler/tests/codegen_test.cpp index cfa9f42a3870e8a4c41d6f1721df855fafc55071..7e972cc93f16b0773c2686c253bddd27b54f8a3b 100644 --- a/compiler/tests/codegen_test.cpp +++ b/compiler/tests/codegen_test.cpp @@ -2491,6 +2491,7 @@ TEST_F(CodegenTest, CompareAnyTypeInst) { auto graph = GetGraph(); graph->SetDynamicMethod(); + graph->SetDynamicStub(); GRAPH(graph) { PARAMETER(0, 0); @@ -2524,6 +2525,7 @@ TEST_F(CodegenTest, CastAnyTypeValueInst) { auto graph = GetGraph(); graph->SetDynamicMethod(); + graph->SetDynamicStub(); GRAPH(graph) { PARAMETER(0, 0); diff --git a/compiler/tests/encoder_operands.cpp b/compiler/tests/encoder_operands.cpp index 8cd45029fec6c66e881b7d8b4dac457a0e65a2da..1cfd95795f1e2ef3359f482c17cc75ed79515302 100644 --- a/compiler/tests/encoder_operands.cpp +++ b/compiler/tests/encoder_operands.cpp @@ -164,14 +164,6 @@ TEST(Operands, Imm) { // Check all possible types: // Imm holds same data (static cast for un-signed) - // GetType - // Getsize - // Is scalar - // Is Valid - // Bounary checks - // Check IsZero - // Inc/dec checks - // INVALID_IMM check for (uint64_t i = 0; i < ITERATION; ++i) { uint8_t u8 = RANDOM_GEN(), u8_z = 0, u8_min = std::numeric_limits::min(), @@ -200,162 +192,144 @@ TEST(Operands, Imm) // Unsigned part - check across static_cast Imm imm_u8(u8), imm_u8_z(u8_z), imm_u8_min(u8_min), imm_u8_max(u8_max); - ASSERT_EQ(imm_u8.GetValue(), static_cast(u8)); - ASSERT_EQ(imm_u8_min.GetValue(), static_cast(u8_min)); - ASSERT_EQ(imm_u8_max.GetValue(), static_cast(u8_max)); - ASSERT_EQ(imm_u8_z.GetValue(), static_cast(u8_z)); + ASSERT_EQ(imm_u8.GetAsInt(), u8); + ASSERT_EQ(imm_u8_min.GetAsInt(), u8_min); + ASSERT_EQ(imm_u8_max.GetAsInt(), u8_max); + ASSERT_EQ(imm_u8_z.GetAsInt(), u8_z); - ASSERT_TRUE(imm_u8_min.IsZero()); - ASSERT_TRUE(imm_u8_z.IsZero()); - ASSERT_FALSE(imm_u8_max.IsZero()); - - ASSERT_TRUE(imm_u8.IsValid()); - ASSERT_TRUE(imm_u8_z.IsValid()); - ASSERT_TRUE(imm_u8_min.IsValid()); - ASSERT_TRUE(imm_u8_max.IsValid()); + TypedImm typed_imm_u8(u8), typed_imm_u8_z(u8_z); + ASSERT_EQ(typed_imm_u8_z.GetType(), INT8_TYPE); + ASSERT_EQ(typed_imm_u8.GetType(), INT8_TYPE); + ASSERT_EQ(typed_imm_u8_z.GetImm().GetAsInt(), u8_z); + ASSERT_EQ(typed_imm_u8.GetImm().GetAsInt(), u8); Imm imm_u16(u16), imm_u16_z(u16_z), imm_u16_min(u16_min), imm_u16_max(u16_max); - ASSERT_EQ(imm_u16.GetValue(), static_cast(u16)); - ASSERT_EQ(imm_u16_min.GetValue(), static_cast(u16_min)); - ASSERT_EQ(imm_u16_max.GetValue(), static_cast(u16_max)); - ASSERT_EQ(imm_u16_z.GetValue(), static_cast(u16_z)); - - ASSERT_TRUE(imm_u16_min.IsZero()); - ASSERT_TRUE(imm_u16_z.IsZero()); - ASSERT_FALSE(imm_u16_max.IsZero()); + ASSERT_EQ(imm_u16.GetAsInt(), u16); + ASSERT_EQ(imm_u16_min.GetAsInt(), u16_min); + ASSERT_EQ(imm_u16_max.GetAsInt(), u16_max); + ASSERT_EQ(imm_u16_z.GetAsInt(), u16_z); - ASSERT_TRUE(imm_u16.IsValid()); - ASSERT_TRUE(imm_u16_z.IsValid()); - ASSERT_TRUE(imm_u16_min.IsValid()); - ASSERT_TRUE(imm_u16_max.IsValid()); + TypedImm typed_imm_u16(u16), typed_imm_u16_z(u16_z); + ASSERT_EQ(typed_imm_u16_z.GetType(), INT16_TYPE); + ASSERT_EQ(typed_imm_u16.GetType(), INT16_TYPE); + ASSERT_EQ(typed_imm_u16_z.GetImm().GetAsInt(), u16_z); + ASSERT_EQ(typed_imm_u16.GetImm().GetAsInt(), u16); Imm imm_u32(u32), imm_u32_z(u32_z), imm_u32_min(u32_min), imm_u32_max(u32_max); - ASSERT_EQ(imm_u32.GetValue(), static_cast(u32)); - ASSERT_EQ(imm_u32_min.GetValue(), static_cast(u32_min)); - ASSERT_EQ(imm_u32_max.GetValue(), static_cast(u32_max)); - ASSERT_EQ(imm_u32_z.GetValue(), static_cast(u32_z)); + ASSERT_EQ(imm_u32.GetAsInt(), u32); + ASSERT_EQ(imm_u32_min.GetAsInt(), u32_min); + ASSERT_EQ(imm_u32_max.GetAsInt(), u32_max); + ASSERT_EQ(imm_u32_z.GetAsInt(), u32_z); - ASSERT_TRUE(imm_u32_min.IsZero()); - ASSERT_TRUE(imm_u32_z.IsZero()); - ASSERT_FALSE(imm_u32_max.IsZero()); - - ASSERT_TRUE(imm_u32.IsValid()); - ASSERT_TRUE(imm_u32_z.IsValid()); - ASSERT_TRUE(imm_u32_min.IsValid()); - ASSERT_TRUE(imm_u32_max.IsValid()); + TypedImm typed_imm_u32(u32), typed_imm_u32_z(u32_z); + ASSERT_EQ(typed_imm_u32_z.GetType(), INT32_TYPE); + ASSERT_EQ(typed_imm_u32.GetType(), INT32_TYPE); + ASSERT_EQ(typed_imm_u32_z.GetImm().GetAsInt(), u32_z); + ASSERT_EQ(typed_imm_u32.GetImm().GetAsInt(), u32); Imm imm_u64(u64), imm_u64_z(u64_z), imm_u64_min(u64_min), imm_u64_max(u64_max); - ASSERT_EQ(imm_u64.GetValue(), static_cast(u64)); - ASSERT_EQ(imm_u64_min.GetValue(), static_cast(u64_min)); - ASSERT_EQ(imm_u64_max.GetValue(), static_cast(u64_max)); - ASSERT_EQ(imm_u64_z.GetValue(), static_cast(u64_z)); - - ASSERT_TRUE(imm_u64_min.IsZero()); - ASSERT_TRUE(imm_u64_z.IsZero()); - ASSERT_FALSE(imm_u64_max.IsZero()); + ASSERT_EQ(imm_u64.GetAsInt(), u64); + ASSERT_EQ(imm_u64_min.GetAsInt(), u64_min); + ASSERT_EQ(imm_u64_max.GetAsInt(), u64_max); + ASSERT_EQ(imm_u64_z.GetAsInt(), u64_z); - ASSERT_TRUE(imm_u64.IsValid()); - ASSERT_TRUE(imm_u64_z.IsValid()); - ASSERT_TRUE(imm_u64_min.IsValid()); - ASSERT_TRUE(imm_u64_max.IsValid()); + TypedImm typed_imm_u64(u64), typed_imm_u64_z(u64_z); + ASSERT_EQ(typed_imm_u64_z.GetType(), INT64_TYPE); + ASSERT_EQ(typed_imm_u64.GetType(), INT64_TYPE); + ASSERT_EQ(typed_imm_u64_z.GetImm().GetAsInt(), u64_z); + ASSERT_EQ(typed_imm_u64.GetImm().GetAsInt(), u64); // Signed part Imm imm_i8(i8), imm_i8_z(i8_z), imm_i8_min(i8_min), imm_i8_max(i8_max); - ASSERT_EQ(imm_i8.GetValue(), i8); - ASSERT_EQ(imm_i8_min.GetValue(), i8_min); - ASSERT_EQ(imm_i8_max.GetValue(), i8_max); - ASSERT_EQ(imm_i8_z.GetValue(), i8_z); + ASSERT_EQ(imm_i8.GetAsInt(), i8); + ASSERT_EQ(imm_i8_min.GetAsInt(), i8_min); + ASSERT_EQ(imm_i8_max.GetAsInt(), i8_max); + ASSERT_EQ(imm_i8_z.GetAsInt(), i8_z); - ASSERT_FALSE(imm_i8_min.IsZero()); - ASSERT_TRUE(imm_i8_z.IsZero()); - ASSERT_FALSE(imm_i8_max.IsZero()); - - ASSERT_TRUE(imm_i8.IsValid()); - ASSERT_TRUE(imm_i8_z.IsValid()); - ASSERT_TRUE(imm_i8_min.IsValid()); - ASSERT_TRUE(imm_i8_max.IsValid()); + TypedImm typed_imm_i8(i8), typed_imm_i8_z(i8_z); + ASSERT_EQ(typed_imm_i8_z.GetType(), INT8_TYPE); + ASSERT_EQ(typed_imm_i8.GetType(), INT8_TYPE); + ASSERT_EQ(typed_imm_i8_z.GetImm().GetAsInt(), i8_z); + ASSERT_EQ(typed_imm_i8.GetImm().GetAsInt(), i8); Imm imm_i16(i16), imm_i16_z(i16_z), imm_i16_min(i16_min), imm_i16_max(i16_max); - ASSERT_EQ(imm_i16.GetValue(), i16); - ASSERT_EQ(imm_i16_min.GetValue(), i16_min); - ASSERT_EQ(imm_i16_max.GetValue(), i16_max); - ASSERT_EQ(imm_i16_z.GetValue(), i16_z); - - ASSERT_FALSE(imm_i16_min.IsZero()); - ASSERT_TRUE(imm_i16_z.IsZero()); - ASSERT_FALSE(imm_i16_max.IsZero()); + ASSERT_EQ(imm_i16.GetAsInt(), i16); + ASSERT_EQ(imm_i16_min.GetAsInt(), i16_min); + ASSERT_EQ(imm_i16_max.GetAsInt(), i16_max); + ASSERT_EQ(imm_i16_z.GetAsInt(), i16_z); - ASSERT_TRUE(imm_i16.IsValid()); - ASSERT_TRUE(imm_i16_z.IsValid()); - ASSERT_TRUE(imm_i16_min.IsValid()); - ASSERT_TRUE(imm_i16_max.IsValid()); + TypedImm typed_imm_i16(i16), typed_imm_i16_z(i16_z); + ASSERT_EQ(typed_imm_i16_z.GetType(), INT16_TYPE); + ASSERT_EQ(typed_imm_i16.GetType(), INT16_TYPE); + ASSERT_EQ(typed_imm_i16_z.GetImm().GetAsInt(), i16_z); + ASSERT_EQ(typed_imm_i16.GetImm().GetAsInt(), i16); Imm imm_i32(i32), imm_i32_z(i32_z), imm_i32_min(i32_min), imm_i32_max(i32_max); - ASSERT_EQ(imm_i32.GetValue(), i32); - ASSERT_EQ(imm_i32_min.GetValue(), i32_min); - ASSERT_EQ(imm_i32_max.GetValue(), i32_max); - ASSERT_EQ(imm_i32_z.GetValue(), i32_z); + ASSERT_EQ(imm_i32.GetAsInt(), i32); + ASSERT_EQ(imm_i32_min.GetAsInt(), i32_min); + ASSERT_EQ(imm_i32_max.GetAsInt(), i32_max); + ASSERT_EQ(imm_i32_z.GetAsInt(), i32_z); - ASSERT_FALSE(imm_i32_min.IsZero()); - ASSERT_TRUE(imm_i32_z.IsZero()); - ASSERT_FALSE(imm_i32_max.IsZero()); - - ASSERT_TRUE(imm_i32.IsValid()); - ASSERT_TRUE(imm_i32_z.IsValid()); - ASSERT_TRUE(imm_i32_min.IsValid()); - ASSERT_TRUE(imm_i32_max.IsValid()); + TypedImm typed_imm_i32(i32), typed_imm_i32_z(i32_z); + ASSERT_EQ(typed_imm_i32_z.GetType(), INT32_TYPE); + ASSERT_EQ(typed_imm_i32.GetType(), INT32_TYPE); + ASSERT_EQ(typed_imm_i32_z.GetImm().GetAsInt(), i32_z); + ASSERT_EQ(typed_imm_i32.GetImm().GetAsInt(), i32); Imm imm_i64(i64), imm_i64_z(i64_z), imm_i64_min(i64_min), imm_i64_max(i64_max); - ASSERT_EQ(imm_i64.GetValue(), i64); - ASSERT_EQ(imm_i64_min.GetValue(), i64_min); - ASSERT_EQ(imm_i64_max.GetValue(), i64_max); - ASSERT_EQ(imm_i64_z.GetValue(), i64_z); - - ASSERT_FALSE(imm_i64_min.IsZero()); - ASSERT_TRUE(imm_i64_z.IsZero()); - ASSERT_FALSE(imm_i64_max.IsZero()); - - ASSERT_TRUE(imm_i64.IsValid()); - ASSERT_TRUE(imm_i64_z.IsValid()); - ASSERT_TRUE(imm_i64_min.IsValid()); - ASSERT_TRUE(imm_i64_max.IsValid()); - - // Float and double part - + ASSERT_EQ(imm_i64.GetAsInt(), i64); + ASSERT_EQ(imm_i64_min.GetAsInt(), i64_min); + ASSERT_EQ(imm_i64_max.GetAsInt(), i64_max); + ASSERT_EQ(imm_i64_z.GetAsInt(), i64_z); + + TypedImm typed_imm_i64(i64), typed_imm_i64_z(i64_z); + ASSERT_EQ(typed_imm_i64_z.GetType(), INT64_TYPE); + ASSERT_EQ(typed_imm_i64.GetType(), INT64_TYPE); + ASSERT_EQ(typed_imm_i64_z.GetImm().GetAsInt(), i64_z); + ASSERT_EQ(typed_imm_i64.GetImm().GetAsInt(), i64); + + // Float test: Imm imm_f32(f32), imm_f32_z(f32_z), imm_f32_min(f32_min), imm_f32_max(f32_max); - ASSERT_EQ(imm_f32.GetValue(), f32); - ASSERT_EQ(imm_f32_min.GetValue(), f32_min); - ASSERT_EQ(imm_f32_max.GetValue(), f32_max); - ASSERT_EQ(imm_f32_z.GetValue(), f32_z); - - ASSERT_FALSE(imm_f32_min.IsZero()); - ASSERT_TRUE(imm_f32_z.IsZero()); - ASSERT_FALSE(imm_f32_max.IsZero()); - - ASSERT_TRUE(imm_f32.IsValid()); - ASSERT_TRUE(imm_f32_z.IsValid()); - ASSERT_TRUE(imm_f32_min.IsValid()); - ASSERT_TRUE(imm_f32_max.IsValid()); + ASSERT_EQ(imm_f32.GetAsFloat(), f32); + ASSERT_EQ(imm_f32_min.GetAsFloat(), f32_min); + ASSERT_EQ(imm_f32_max.GetAsFloat(), f32_max); + ASSERT_EQ(imm_f32_z.GetAsFloat(), f32_z); + ASSERT_EQ(bit_cast(static_cast(imm_f32.GetRawValue())), f32); + ASSERT_EQ(bit_cast(static_cast(imm_f32_min.GetRawValue())), f32_min); + ASSERT_EQ(bit_cast(static_cast(imm_f32_max.GetRawValue())), f32_max); + ASSERT_EQ(bit_cast(static_cast(imm_f32_z.GetRawValue())), f32_z); + + TypedImm typed_imm_f32(f32), typed_imm_f32_z(f32_z); + ASSERT_EQ(typed_imm_f32_z.GetType(), FLOAT32_TYPE); + ASSERT_EQ(typed_imm_f32.GetType(), FLOAT32_TYPE); + ASSERT_EQ(typed_imm_f32_z.GetImm().GetAsFloat(), f32_z); + ASSERT_EQ(typed_imm_f32.GetImm().GetAsFloat(), f32); Imm imm_f64(f64), imm_f64_z(f64_z), imm_f64_min(f64_min), imm_f64_max(f64_max); - ASSERT_EQ(imm_f64.GetValue(), f64); - ASSERT_EQ(imm_f64_min.GetValue(), f64_min); - ASSERT_EQ(imm_f64_max.GetValue(), f64_max); - ASSERT_EQ(imm_f64_z.GetValue(), f64_z); - - ASSERT_FALSE(imm_f64_min.IsZero()); - ASSERT_TRUE(imm_f64_z.IsZero()); - ASSERT_FALSE(imm_f64_max.IsZero()); - - ASSERT_TRUE(imm_f64.IsValid()); - ASSERT_TRUE(imm_f64_z.IsValid()); - ASSERT_TRUE(imm_f64_min.IsValid()); - ASSERT_TRUE(imm_f64_max.IsValid()); + ASSERT_EQ(imm_f64.GetAsDouble(), f64); + ASSERT_EQ(imm_f64_min.GetAsDouble(), f64_min); + ASSERT_EQ(imm_f64_max.GetAsDouble(), f64_max); + ASSERT_EQ(imm_f64_z.GetAsDouble(), f64_z); + ASSERT_EQ(bit_cast(imm_f64.GetRawValue()), f64); + ASSERT_EQ(bit_cast(imm_f64_min.GetRawValue()), f64_min); + ASSERT_EQ(bit_cast(imm_f64_max.GetRawValue()), f64_max); + ASSERT_EQ(bit_cast(imm_f64_z.GetRawValue()), f64_z); + + TypedImm typed_imm_f64(f64), typed_imm_f64_z(f64_z); + ASSERT_EQ(typed_imm_f64_z.GetType(), FLOAT64_TYPE); + ASSERT_EQ(typed_imm_f64.GetType(), FLOAT64_TYPE); + ASSERT_EQ(typed_imm_f64_z.GetImm().GetAsDouble(), f64_z); + ASSERT_EQ(typed_imm_f64.GetImm().GetAsDouble(), f64); } - // Sizeof imm: - // Imm holds 2 uint64_t values (std::variant) - ASSERT_LE(sizeof(Imm), sizeof(uint64_t) * 2); + +#ifndef NDEBUG + // Imm holds std::variant: + ASSERT_EQ(sizeof(Imm), sizeof(uint64_t) * 2); +#else + // Imm holds 64-bit storage only: + ASSERT_EQ(sizeof(Imm), sizeof(uint64_t)); +#endif // NDEBUG } TEST(Operands, MemRef) diff --git a/compiler/tests/ir_builder_test.cpp b/compiler/tests/ir_builder_test.cpp index 98052fd7195b90d08845ae41f248830540d84c77..573023768043bd397cafee4e187bf1968b21e380 100644 --- a/compiler/tests/ir_builder_test.cpp +++ b/compiler/tests/ir_builder_test.cpp @@ -6304,12 +6304,10 @@ TEST_F(IrBuilderTest, JumpInsideTryBlock) TEST_F(IrBuilderTest, CompareAnyType) { // no crash. - auto graph = CreateGraphWithDefaultRuntime(); - graph->SetDynamicMethod(); + auto graph = CreateGraphDynWithDefaultRuntime(); GRAPH(graph) { - PARAMETER(0, 0); - INS(0).SetType(DataType::Type::ANY); + PARAMETER(0, 0).any(); BASIC_BLOCK(2, -1) { @@ -6329,12 +6327,10 @@ TEST_F(IrBuilderTest, CompareAnyType) TEST_F(IrBuilderTest, CastAnyTypeValue) { // no crash. - auto graph = CreateGraphWithDefaultRuntime(); - graph->SetDynamicMethod(); + auto graph = CreateGraphDynWithDefaultRuntime(); GRAPH(graph) { - PARAMETER(0, 0); - INS(0).SetType(DataType::Type::ANY); + PARAMETER(0, 0).any(); BASIC_BLOCK(2, -1) { diff --git a/compiler/tests/unit_test.h b/compiler/tests/unit_test.h index 3bd3708a6ea63dc7bcdc8976615b521376c6ce7d..7ccd6a39ea1c3cc66e8ff4ee52f778e59ed381bd 100644 --- a/compiler/tests/unit_test.h +++ b/compiler/tests/unit_test.h @@ -299,6 +299,15 @@ public: return graph; } + Graph *CreateGraphDynStubWithDefaultRuntime() + { + auto *graph = GetAllocator()->New(GetAllocator(), GetLocalAllocator(), arch_); + graph->SetRuntime(GetDefaultRuntime()); + graph->SetDynamicMethod(); + graph->SetDynamicStub(); + return graph; + } + // this method is needed to create a graph with a working dump Graph *CreateGraphOsrWithDefaultRuntime() { diff --git a/cross_values/CMakeLists.txt b/cross_values/CMakeLists.txt index 225e4b72ba250d13bc19bf83f342934175559f8f..cf62c6183ba39806bb7c76935868959eaf790fd0 100644 --- a/cross_values/CMakeLists.txt +++ b/cross_values/CMakeLists.txt @@ -43,10 +43,10 @@ if (NOT CROSS_VALUES_CONFIG) -DPANDA_RELEASE_BUILD=${PANDA_RELEASE_BUILD} -DTOOLCHAIN_CLANG_ROOT=${TOOLCHAIN_CLANG_ROOT} -DTOOLCHAIN_SYSROOT=${TOOLCHAIN_SYSROOT} - -DPANDA_WITH_TESTS=${PANDA_WITH_TESTS} + -DPANDA_WITH_TESTS=FALSE + -DPANDA_WITH_BENCHMARKS=FALSE -DPANDA_ENABLE_UNDEFINED_BEHAVIOR_SANITIZER=${PANDA_ENABLE_UNDEFINED_BEHAVIOR_SANITIZER} -DPANDA_ENABLE_ADDRESS_SANITIZER=${PANDA_ENABLE_ADDRESS_SANITIZER} - -DPANDA_EXPORT_CTS_OPTIONS=${PANDA_EXPORT_CTS_OPTIONS} -DCROSS_VALUES_CONFIG=TRUE --no-warn-unused-cli diff --git a/disassembler/disassembler.cpp b/disassembler/disassembler.cpp index 6a4882a3eececba8b161ce3d3693d4fba335352d..4cc1cc5280ca77b996a8374506458c71b74577b3 100644 --- a/disassembler/disassembler.cpp +++ b/disassembler/disassembler.cpp @@ -1495,8 +1495,7 @@ std::string Disassembler::IDToString(BytecodeInstruction bc_ins, panda_file::Fil name << StringDataToString(file_->GetStringData(field_accessor.GetNameId())); } else if (bc_ins.HasFlag(BytecodeInstruction::Flags::LITERALARRAY_ID)) { panda_file::LiteralDataAccessor lit_array_accessor(*file_, file_->GetLiteralArraysId()); - auto idx = bc_ins.GetId().AsFileId(); - auto index = lit_array_accessor.ResolveLiteralArrayIndex(idx); + auto index = bc_ins.GetId().AsIndex(); name << "array_" << index; } diff --git a/disassembler/templates/get_ins_info.cpp.erb b/disassembler/templates/get_ins_info.cpp.erb index 0fa1c9fd265004cb6ebe8976d5ce47c981fb90dc..73ef464c94dfc70c1981dee00997dc3dad20420c 100644 --- a/disassembler/templates/get_ins_info.cpp.erb +++ b/disassembler/templates/get_ins_info.cpp.erb @@ -20,6 +20,7 @@ namespace panda::disasm { +// NOLINTNEXTLINE(readability-function-size) void Disassembler::GetInsInfo(const panda_file::File::EntityId& code_id, MethodInfo* method_info /* out */) const { const static size_t FORMAT_WIDTH = 20; const static size_t INSTRUCTION_WIDTH = 2; diff --git a/disassembler/templates/intrinsics_gen.h.erb b/disassembler/templates/intrinsics_gen.h.erb index 5c0198f4994cc26ff719904a7fd27e6eb16574e6..1ef37fb7ebb7811a79e62e5b5052fda52fa51ebb 100644 --- a/disassembler/templates/intrinsics_gen.h.erb +++ b/disassembler/templates/intrinsics_gen.h.erb @@ -43,7 +43,7 @@ bool Initialize() { std::string_view space; -% Runtime::intrinsics.each do |intrinsic| +% Runtime::intrinsics.select { |i| !i.signature.stackrange }.each do |intrinsic| space = std::string_view("<%= intrinsic.space %>"); if (std::find(spaces.begin(), spaces.end(), space) != spaces.end()) { auto mutf8_name = reinterpret_cast("<%= get_object_descriptor(intrinsic.class_name) %>"); diff --git a/docs/file_format.md b/docs/file_format.md index e02f5a99ab1e0a99ff47862c45949b9288b41051..61c85b85e1a5707d2648375e4cb7d686c8ef9da4 100644 --- a/docs/file_format.md +++ b/docs/file_format.md @@ -725,7 +725,7 @@ Format: | Name | Format | Description | | ---- | ------ | ----------- | -| `value` | `uint32_t` | The value represents an offset to [Method](#method) or [ForeignMethod](#foreignmethod). | +| `value` | `uint32_t` | The value represents an offset to [Method](#method) or [ForeignMethod](#foreignmethod). | #### MethodHandleValue @@ -963,7 +963,7 @@ A line number program consists of instructions. Each instruction has one byte op | `SET_EPILOGUE_BEGIN` | `0x08` | | | Set `epilogue_end` register to `true`. Any special opcodes clear `epilogue_end` register. | | `SET_FILE` | `0x09` | | `uleb128` | Set `file` register to the value `constant_pool_ptr` refers to. The argument is an offset to [String](#string) which represents the file name or `0`. | | `SET_SOURCE_CODE` | `0x0a` | | `uleb128` | Set `source_code` register to the value `constant_pool_ptr` refers to. The argument is an offset to [String](#string) which represents the source code or `0`. | -| SET_COLUMN | `0x0b` | | `uleb128` | Set `column` register by the value `constant_pool_ptr` refers to | +| `SET_COLUMN` | `0x0b` | | `uleb128` | Set `column` register by the value `constant_pool_ptr` refers to | | Special opcodes | `0x0c..0xff` | | | | Special opcodes: diff --git a/docs/runtime-compiled_code-interaction.md b/docs/runtime-compiled_code-interaction.md index a1e53ffd29e66d4b41892dc394605a017ed9ddec..93cde6512c0ef8a5e5db42b05765afb3b8527ff5 100644 --- a/docs/runtime-compiled_code-interaction.md +++ b/docs/runtime-compiled_code-interaction.md @@ -26,7 +26,7 @@ changes its entrypoint to newly generated code. Next time when the function gets ## Calling convention Panda runtime and managed code must call functions according to the target calling convention. -Compiled code of a managed function must accept one extra argumnent: the pointer to `panda::Method` which describes this function. +Compiled code of a managed function must accept one extra argument: the pointer to `panda::Method` which describes this function. This argument must be the first argument. Example: @@ -39,6 +39,12 @@ the function accepts 3 arguments: The function must return the result in the register R0. +### Dynamic calling convention +For dynamic languages a compiled function accepts all arguments in stack slots. +Two additional arguments (`panda::Method*` and `uint32_t num_actual_args`) are passed in first two argument registers of the target calling convention. + +The result of call is placed according to the target calling convention. + ## Structure of `panda::ManagedThread` `panda::ManagedThread` has the following fields that compiled code may use: diff --git a/irtoc/backend/compilation.cpp b/irtoc/backend/compilation.cpp index a1bba1a962e5f96e086cb37064127a69f5c66760..cf74b94bd0ee4d437716abe809b4cdd4cbaec135 100644 --- a/irtoc/backend/compilation.cpp +++ b/irtoc/backend/compilation.cpp @@ -170,7 +170,7 @@ Compilation::Result Compilation::MakeElf(std::string_view output) #endif static constexpr size_t MAX_CODE_ALIGNMENT = 64; - static constexpr std::array padding_data {0}; + static constexpr std::array PADDING_DATA {0}; CHECK_LE(GetCodeAlignment(GetArch()), MAX_CODE_ALIGNMENT); uint32_t code_alignment = GetCodeAlignment(GetArch()); @@ -183,7 +183,7 @@ Compilation::Result Compilation::MakeElf(std::string_view output) // Align function if (auto padding = offset % code_alignment; padding != 0) { - text_sec->append_data(reinterpret_cast(padding_data.data()), padding); + text_sec->append_data(reinterpret_cast(PADDING_DATA.data()), padding); offset += padding; } auto symbol = symbol_writer.add_symbol(str_writer, unit->GetName(), offset, code.size(), STB_GLOBAL, STT_FUNC, diff --git a/irtoc/backend/compiler/codegen_boundary.cpp b/irtoc/backend/compiler/codegen_boundary.cpp index 13f3be371c932255f1dfd415e40ecd6127e74b72..7a45df1397581b9d835260abc5c1f9d9d156e4f4 100644 --- a/irtoc/backend/compiler/codegen_boundary.cpp +++ b/irtoc/backend/compiler/codegen_boundary.cpp @@ -39,7 +39,8 @@ void CodegenBoundary::GeneratePrologue() encoder->EncodeStr(GetTarget().GetLinkReg(), MemRef(ThreadReg(), GetRuntime()->GetTlsFrameOffset(GetArch()))); } else { static constexpr ssize_t FP_OFFSET = 2; - encoder->EncodeSti(Imm(FrameBridgeKind::COMPILED_CODE_TO_INTERPRETER), + ASSERT(sizeof(FrameBridgeKind) <= GetTarget().WordSize()); + encoder->EncodeSti(FrameBridgeKind::COMPILED_CODE_TO_INTERPRETER, GetTarget().WordSize(), MemRef(GetTarget().GetStackReg(), -1 * GetTarget().WordSize())); encoder->EncodeStr(GetTarget().GetFrameReg(), MemRef(GetTarget().GetStackReg(), -FP_OFFSET * GetTarget().WordSize())); diff --git a/irtoc/backend/dwarf_builder.cpp b/irtoc/backend/dwarf_builder.cpp index 27160dbfb67dddf509a9a92bcf18fb1747263eba..05c7b0c0920876ca4143fd2a6e8d4351402bc86d 100644 --- a/irtoc/backend/dwarf_builder.cpp +++ b/irtoc/backend/dwarf_builder.cpp @@ -203,11 +203,11 @@ bool DwarfBuilder::Finalize(uint32_t code_size) for (decltype(sections) i = 0; i < sections; ++i) { Dwarf_Unsigned len = 0; - Dwarf_Signed elfIdx = 0; - auto bytes = reinterpret_cast(dwarf_get_section_bytes(dwarf_, i, &elfIdx, &len, &error)); + Dwarf_Signed elf_idx = 0; + auto bytes = reinterpret_cast(dwarf_get_section_bytes(dwarf_, i, &elf_idx, &len, &error)); ASSERT(error == DW_DLV_OK); (void)bytes; - sections_[index_map_[elfIdx]]->append_data(bytes, len); + sections_[index_map_[elf_idx]]->append_data(bytes, len); } Dwarf_Unsigned rel_sections_count; diff --git a/irtoc/backend/function.cpp b/irtoc/backend/function.cpp index 012bd995642839bc9651a14a15a3b27cf8f72b5b..b7a0d48c61769a044c4dee726fcc3048aa5a4956 100644 --- a/irtoc/backend/function.cpp +++ b/irtoc/backend/function.cpp @@ -54,9 +54,11 @@ Function::Result Function::Compile(Arch arch, ArenaAllocator *allocator, ArenaAl { IrtocRuntimeInterface runtime; + // NOLINTNEXTLINE(readability-identifier-naming) ArenaAllocator &allocator_ = *allocator; + // NOLINTNEXTLINE(readability-identifier-naming) ArenaAllocator &local_allocator_ = *local_allocator; - + // NOLINTNEXTLINE(readability-identifier-naming) graph_ = allocator_.New(&allocator_, &local_allocator_, arch, this, &runtime, false); builder_ = std::make_unique(); diff --git a/irtoc/scripts/interpreter.irt b/irtoc/scripts/interpreter.irt index 9fe5cf49df6744081332dab98c058bda7e6e7799..9c2dc61a950c09ed2e1dd86a5161df6a4120794c 100644 --- a/irtoc/scripts/interpreter.irt +++ b/irtoc/scripts/interpreter.irt @@ -610,9 +610,9 @@ macro(:handle_lda_type_id16) do |id| set_acc_object(type) end -macro(:handle_lda_const_v8_id32) do |v, id| +macro(:handle_lda_const_v8_id16) do |v, id| method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - cnst := call_runtime("ResolveLiteralArrayByIdEntrypoint", method_ptr, id).ptr + cnst := call_runtime("ResolveLiteralArrayByIdEntrypoint", method_ptr, u16tou32(id)).ptr If(cnst, 0).CC(:CC_EQ).b { move_to_exception } @@ -653,18 +653,20 @@ macro(:handle_inci_v4_imm4) do |v, imm| set_value(v, add).i32 end -macro(:handle_cmp) do |acc_val, vs| - # TODO: use Cmp IR instruction? - If(acc_val, vs).CC(:CC_LT).b { - res1 := -1 - } Else { - If(acc_val, vs).CC(:CC_EQ).b { - res2 := 0 +[['LT', ''], ['B', 'u']].each do |cc, sign| + macro(:"handle_#{sign}cmp") do |acc_val, vs| + # TODO: use Cmp IR instruction? + If(acc_val, vs).CC(:"CC_#{cc}").b { + res1 := -1 } Else { - res3 := 1 + If(acc_val, vs).CC(:CC_EQ).b { + res2 := 0 + } Else { + res3 := 1 + } } - } - acc := Phi(res1, res2, res3).i32 + acc := Phi(res1, res2, res3).i32 + end end ['Add', 'Sub', 'And', 'Mul', 'Or', 'Xor', 'Shl', 'Shr', 'AShr'].each do |op| @@ -942,19 +944,42 @@ macro(:handle_newobj_v8_id16) do |vd, id| set_object(vd, object).ref end -[['', :u32], ['64_', :u64]].each do |name, type| - macro(:"handle_stobj_#{name}v8_id16") do |vs, id| - If(vs, 0).CC(:CC_EQ).b { - call_runtime("ThrowNullPointerExceptionFromInterpreter").void - move_to_exception - } - field := field_offset(id, false) - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - offset = LoadI(field).Imm("Field::GetOffsetOffset()").word - Store(vs, offset, acc.send(type)).send(type) - end +macro(:"handle_stobj_v8_id16") do |vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id, false) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| + If(field_type_id, typeid).CC(:CC_EQ).b { + acc_type = field_type[0] + "32" + Store(vs, offset, acc.send(:"#{acc_type}")).send(:"#{field_type}") + } + end + } Else { + Store(vs, offset, acc.u32).u32 + } +end + +macro(:"handle_stobj_64_v8_id16") do |vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id, false) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + Store(vs, offset, acc.u64).u64 end macro(:handle_stobj_obj_v8_id16) do |vs, id| @@ -971,20 +996,44 @@ macro(:handle_stobj_obj_v8_id16) do |vs, id| Store(vs, offset, acc.ref).SetNeedBarrier(true).ref end -[['', :u32], ['64_', :u64]].each do |name, type| - macro(:"handle_stobj_v_#{name}v4_v4_id16") do |v1, v2, id| - If(v2, 0).CC(:CC_EQ).b { - call_runtime("ThrowNullPointerExceptionFromInterpreter").void - move_to_exception - } - field := field_offset(id) - acc := Phi(acc, acc_restored).send(acc.type) - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - offset = LoadI(field).Imm("Field::GetOffsetOffset()").word - Store(v2.ref, offset, v1.send(type)).send(type) - end +macro(:"handle_stobj_v_v4_v4_id16") do |v1, v2, id| + If(v2, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id) + acc := Phi(acc, acc_restored).send(acc.type) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| + If(field_type_id, typeid).CC(:CC_EQ).b { + reg_type = field_type[0] + "32" + Store(v2, offset, v1.send(:"#{reg_type}")).send(:"#{field_type}") + } + end + } Else { + Store(v2, offset, v1.u32).u32 + } +end + +macro(:"handle_stobj_v_64_v4_v4_id16") do |v1, v2, id| + If(v2, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id) + acc := Phi(acc, acc_restored).send(acc.type) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + Store(v2, offset, v1.u64).u64 end macro(:handle_stobj_v_obj_v4_v4_id16) do |v1, v2, id| @@ -1001,22 +1050,51 @@ macro(:handle_stobj_v_obj_v4_v4_id16) do |v1, v2, id| Store(v2.ref, offset, v1.ref).SetNeedBarrier(true).ref end -[['', :u32], ['64_', :u64]].each do |name, type| - macro(:"handle_ldobj_#{name}v8_id16") do |vs, id| - If(vs, 0).CC(:CC_EQ).b { - call_runtime("ThrowNullPointerExceptionFromInterpreter").void - move_to_exception - } - save_acc() - field := field_offset(id, false) - # no restore as acc is going to be redefined - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - offset = LoadI(field).Imm("Field::GetOffsetOffset()").word - value := Load(vs, offset).send(type) - set_acc_primitive(value) - end +macro(:"handle_ldobj_v8_id16") do |vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + save_acc() + field := field_offset(id, false) + # no restore as acc is going to be redefined + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| + If(field_type_id, typeid).CC(:CC_EQ).b { + acc_value := send(:"#{field_type}to#{field_type[0] + "32"}", Load(vs, offset).send(:"#{field_type}")) + } + acc := Phi(acc.u64, acc_value.u64).u64 + end + acc_casted_slow := acc + } Else { + acc_casted_fast := Load(vs, offset).u32 + } + + acc := Phi(acc_casted_slow.u64, acc_casted_fast.u64).u64 + acc_tag := Constants::PRIMITIVE_TAG +end + +macro(:"handle_ldobj_64_v8_id16") do |vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + save_acc() + field := field_offset(id, false) + # no restore as acc is going to be redefined + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + acc := Load(vs, offset).u64 + acc_tag := Constants::PRIMITIVE_TAG end macro(:handle_ldobj_obj_v8_id16) do |vs, id| @@ -1035,20 +1113,42 @@ macro(:handle_ldobj_obj_v8_id16) do |vs, id| set_acc_object(value) end -[['', :u32], ['64_', :u64]].each do |name, type| - macro(:"handle_ldobj_v_#{name}v4_v4_id16") do |vd, vs, id| - If(vs, 0).CC(:CC_EQ).b { - call_runtime("ThrowNullPointerExceptionFromInterpreter").void - move_to_exception - } - field := field_offset(id) - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - offset = LoadI(field).Imm("Field::GetOffsetOffset()").word - value := Load(vs, offset).send(type) - set_primitive(vd, value).send(type) - end +macro(:"handle_ldobj_v_v4_v4_id16") do |vd, vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| + If(field_type_id, typeid).CC(:CC_EQ).b { + store_type = field_type[0] + "32" + set_primitive(vd, send(:"#{field_type}to#{store_type}", Load(vs, offset).send(:"#{field_type}"))).send(:"#{store_type}") + } + end + } Else { + set_primitive(vd, Load(vs, offset).u32).u32 + } +end + +macro(:"handle_ldobj_v_64_v4_v4_id16") do |vd, vs, id| + If(vs, 0).CC(:CC_EQ).b { + call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr + move_to_exception + } + field := field_offset(id) + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + set_primitive(vd, Load(vs, offset).u64).u64 end macro(:handle_ldobj_v_obj_v4_v4_id16) do |vd, vs, id| @@ -1065,34 +1165,46 @@ macro(:handle_ldobj_v_obj_v4_v4_id16) do |vd, vs, id| set_object(vd, value).ref end -[['', 32], ['64_', 64]].each do |name, type_size| - macro(:"handle_ststatic_#{name}id16") do |id| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - update_bytecode_offset +macro(:"handle_ststatic_id16") do |id| + method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr + update_bytecode_offset - field := static_field(id, false) - # no restore because acc holds primitive value + field := static_field(id, false) + # no restore because acc holds primitive value - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 - field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + field_class := LoadI(field).Imm("Field::GetClassOffset()").ref - [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"], [0x7, "i32"], [0x8, "u32"], [0x9, "u32"], [0xa, "u64"], [0xb, "i64"], [0xc, "u64"]].each do |typeid, type| - offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 - field_class := LoadI(field).Imm("Field::GetClassOffset()").ref + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| If(field_type_id, typeid).CC(:CC_EQ).b { - if type[1..-1].to_i > type_size - input_type = type[0] + "#{type_size}" - Store(field_class, offset, send(:"#{input_type}to#{type}", acc.send(:"#{input_type}"))).send(:"#{type}") - else - Store(field_class, offset, acc.send(:"#{type}")).send(:"#{type}") - end + acc_type = field_type[0] + "32" + Store(field_class, offset, acc.send(:"#{acc_type}")).send(:"#{field_type}") } end + } Else { + Store(field_class, offset, acc.u32).u32 + } +end - end +macro(:"handle_ststatic_64_id16") do |id| + method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr + update_bytecode_offset + + field := static_field(id, false) + # no restore because acc holds primitive value + + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + field_class := LoadI(field).Imm("Field::GetClassOffset()").ref + Store(field_class, offset, acc.u64).u64 end macro(:handle_ststatic_obj_id16) do |id| @@ -1106,37 +1218,50 @@ macro(:handle_ststatic_obj_id16) do |id| field_class := LoadI(field).Imm("Field::GetClassOffset()").ref Store(field_class, offset, acc.ref).SetNeedBarrier(true).ref end + +macro(:"handle_ldstatic_id16") do |id| + method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr + update_bytecode_offset + save_acc() + field := static_field(id, false) + # no restore as acc is going to be redefined + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 + field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + field_class := LoadI(field).Imm("Field::GetClassOffset()").ref -[['', 32], ['64_', 64]].each do |name, type_size| - macro(:"handle_ldstatic_#{name}id16") do |id| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - update_bytecode_offset - save_acc() - field := static_field(id, false) - # no restore as acc is going to be redefined - If(field, 0).CC(:CC_EQ).b { - move_to_exception - } - field_access_flags := LoadI(field).Imm("Field::GetAccessFlagsOffset()").u32 - field_type_id := ShrI(AndI(field_access_flags).Imm("ACC_TYPE").u32).Imm("ACC_TYPE_SHIFT").u32 - offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 - field_class := LoadI(field).Imm("Field::GetClassOffset()").ref - - [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"], [0x7, "i32"], [0x8, "u32"], [0x9, "u32"], [0xa, "u64"], [0xb, "i64"], [0xc, "u64"]].each do |typeid, type| - value := Load(field_class, offset).send(type) + If(field_type_id, 0x7).CC(:CC_LT).b { # < 32 bits + [[0x2, "u8"], [0x3, "i8"], [0x4, "u8"], [0x5, "i16"], [0x6, "u16"]].each do |typeid, field_type| If(field_type_id, typeid).CC(:CC_EQ).b { - if type[1..-1].to_i < type_size - store_type = type[0] + "#{type_size}" - casted_value := send(:"#{type}to#{store_type}", value) - acc_casted := casted_value - else - acc_casted := value - end + acc_value := send(:"#{field_type}to#{field_type[0] + "32"}", Load(field_class, offset).send(:"#{field_type}")) } - acc := Phi(acc.u64, acc_casted.u64).u64 + acc := Phi(acc.u64, acc_value.u64).u64 end - acc_tag := Constants::PRIMITIVE_TAG - end + acc_casted_slow := acc + } Else { + acc_casted_fast := Load(field_class, offset).u32 + } + + acc := Phi(acc_casted_slow.u64, acc_casted_fast.u64).u64 + acc_tag := Constants::PRIMITIVE_TAG +end + +macro(:"handle_ldstatic_64_id16") do |id| + method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr + update_bytecode_offset + save_acc() + field := static_field(id, false) + # no restore as acc is going to be redefined + If(field, 0).CC(:CC_EQ).b { + move_to_exception + } + offset = LoadI(field).Imm("Field::GetOffsetOffset()").u32 + field_class := LoadI(field).Imm("Field::GetClassOffset()").ref + acc := Load(field_class, offset).u64 + acc_tag := Constants::PRIMITIVE_TAG end macro(:handle_ldstatic_obj_id16) do |id| @@ -1493,7 +1618,7 @@ end Panda.instructions.each do |i| op = i.operands # alias for brevity mode = [:Interpreter] - mode << :DynamicMethod if i.properties.include?('dynamic') + mode.push(:DynamicMethod, :DynamicStub) if i.properties.include?('dynamic') lang = i.namespace == 'core' ? 'PANDA_ASSEMBLY' : i.namespace.upcase # Remove profile part from the handler name, thereby we avoid adjusting of handler names each time we add profile @@ -1551,21 +1676,23 @@ Panda.instructions.each do |i| frame := %frame tr := %tr - i.acc_and_operands.each do |o| - if o.dst? && !o.src? - next - end - if o.acc? - if o.type == "ref" || (o.type.include? "[]") - assert_has_object_eq(acc_tag.u64) - elsif ([o.type] & ['none', 'top', 'any']).empty? - assert_has_object_ne(acc_tag.u64) + if defines.DEBUG + i.acc_and_operands.each do |o| + if o.dst? && !o.src? + next end - elsif o.reg? - if o.type == "ref" || (o.type.include? "[]") - assert_has_object_eq(get_tag(vreg_ptr(o))) - elsif ([o.type] & ['none', 'top', 'any']).empty? - assert_has_object_ne(get_tag(vreg_ptr(o))) + if o.acc? + if o.type == "ref" || (o.type.include? "[]") + assert_has_object_eq(acc_tag.u64) + elsif ([o.type] & ['none', 'top', 'any']).empty? + assert_has_object_ne(acc_tag.u64) + end + elsif o.reg? + if o.type == "ref" || (o.type.include? "[]") + assert_has_object_eq(get_tag(vreg_ptr(o))) + elsif ([o.type] & ['none', 'top', 'any']).empty? + assert_has_object_ne(get_tag(vreg_ptr(o))) + end end end end @@ -1604,8 +1731,8 @@ Panda.instructions.each do |i| handle_lda_str_id32(as_id(op[0])) when "LDA_TYPE_ID16" handle_lda_type_id16(as_id(op[0])) - when "LDA_CONST_V8_ID32" - handle_lda_const_v8_id32(vreg_ptr(op[0]), as_id(op[1])) + when "LDA_CONST_V8_ID16" + handle_lda_const_v8_id16(vreg_ptr(op[0]), as_id(op[1])) when "LDAI_IMM8" handle_ldai_imm(i8toi32(as_imm(op[0]))) when "LDAI_IMM16" @@ -1780,9 +1907,9 @@ Panda.instructions.each do |i| when "FCMPL_64_V8" handle_fcmpl_64_v8(vreg_value(op[0]).f64) when "UCMP_PREF_V8" - handle_cmp(acc.u32, vreg_value(op[0]).u32) + handle_ucmp(acc.u32, vreg_value(op[0]).u32) when "UCMP_64_PREF_V8" - handle_cmp(acc.u64, vreg_value(op[0]).u64) + handle_ucmp(acc.u64, vreg_value(op[0]).u64) when "CMP_64_V8" handle_cmp(acc.i64, vreg_value(op[0]).i64) # add @@ -1861,9 +1988,9 @@ Panda.instructions.each do |i| when "MOD2_64_V8" handle_mod2_64_v8(vreg_value(op[0]).i64) when "MODU2_PREF_V8" - handle_modu2_v8(vreg_value(op[0]).i32) + handle_modu2_v8(vreg_value(op[0]).u32) when "MODU2_64_PREF_V8" - handle_modu2_64_v8(vreg_value(op[0]).i64) + handle_modu2_64_v8(vreg_value(op[0]).u64) # neg when "FNEG_64" handle_fneg_64() diff --git a/isa/isa.yaml b/isa/isa.yaml index 2e9bc3d674e367ac4b060f7ee7cfb5deff640fca..4453698db25730deb3b245a3afb0be027f172794 100644 --- a/isa/isa.yaml +++ b/isa/isa.yaml @@ -210,6 +210,8 @@ verification: description: Type_id operand must correspond to an object type (other than array). - tag: type_id_any_object description: Type_id operand must correspond to any object type. + - tag: type_id_class + description: Type_id operand must correspond to a Class type. - tag: method_id_static description: Method_id must resolve to a static method or into initializer for a type other than one-dimensional array. - tag: method_id_non_static @@ -257,6 +259,10 @@ profiles_schema: {} # Profiles should be specified in an ISA description for a specific language. profiles: [] +namespaces: + - namespace: core + used_instructions: [] + groups: - title: No operation description: Perform an operation without behavior. @@ -428,7 +434,7 @@ groups: instructions: - sig: lda.const v:out:ref, literalarray_id acc: none - format: [op_v_8_id_32] + format: [op_v_8_id_16] opcode_idx: [0x19] - title: Load accumulator from type constant pool @@ -439,7 +445,7 @@ groups: exceptions: - x_link verification: - - type_id_any_object + - type_id_class pseudo: | type = load(id) if type == nullptr then diff --git a/isa/isapi.rb b/isa/isapi.rb index 5d7e6d56532b9a26b53a1747e71a9edba5897bbb..c0c02608dd6ca7b8964049d27218b9e6dc04c468 100755 --- a/isa/isapi.rb +++ b/isa/isapi.rb @@ -150,7 +150,7 @@ class Instruction < SimpleDelegator # Format instance for raw-data format name def format - Panda.format_hash[dig(:format)] + Panda.format_hash[dig(:format)] || Quick.format_hash[dig(:format)] end # Array of explicit operands @@ -503,6 +503,10 @@ module Panda @data.profiles.map { |x| [x.name, x] }.to_h end + def quickened_plugins + @data.namespaces.map { |i| [i.namespace, i.used_instructions] if i.quickening }.compact.to_h + end + # Hash with exception tag as a key and exception description as a value cached def exceptions_hash convert_to_hash(exceptions) @@ -555,7 +559,7 @@ module Panda # Array of all Format instances def formats - format_hash.values.uniq(&:pretty).sort_by(&:pretty) + format_hash.merge(Quick.format_hash).values.uniq(&:pretty).sort_by(&:pretty) end # delegating part of module @@ -576,27 +580,26 @@ module Panda end end - # private functions - # - private_class_method def convert_to_hash(arr) - hash = arr.map { |i| [i.tag, i.description] }.to_h - hash.default_proc = proc { |_, k| raise KeyError, "#{k} not found" } - hash - end + cached def each_data_instruction + # create separate instance for every instruction format and inherit group properties + groups.each_with_object([]) do |g, obj| + g.instructions.each do |i| + data_insn = merge_group_and_insn(g, i) + if data_insn[:opcode_idx] && (data_insn[:opcode_idx].size != data_insn[:format].size) + raise 'format and opcode_idx arrays should have equal size' + end - private_class_method def merge_group_and_insn(group, insn) - props = group.to_h - props.delete(:instructions) - props.merge(insn.to_h) do |_, old, new| - if old.is_a?(Array) && new.is_a?(Array) - old | new # extend array-like properties instead of overriding - else - new + data_insn[:format].each_with_index do |f, idx| + insn = data_insn.dup + insn[:format] = f + insn[:opcode_idx] = data_insn[:opcode_idx][idx] if data_insn[:opcode_idx] + obj << OpenStruct.new(insn) + end end - end + end.to_enum end - private_class_method cached def each_data_instruction + cached def each_data_instruction # create separate instance for every instruction format and inherit group properties groups.each_with_object([]) do |g, obj| g.instructions.each do |i| @@ -615,6 +618,26 @@ module Panda end.to_enum end + # private functions + # + private_class_method def convert_to_hash(arr) + hash = arr.map { |i| [i.tag, i.description] }.to_h + hash.default_proc = proc { |_, k| raise KeyError, "#{k} not found" } + hash + end + + private_class_method def merge_group_and_insn(group, insn) + props = group.to_h + props.delete(:instructions) + props.merge(insn.to_h) do |_, old, new| + if old.is_a?(Array) && new.is_a?(Array) + old | new # extend array-like properties instead of overriding + else + new + end + end + end + private_class_method def initialize_instructions(opcodes, &block) each_data_instruction.select(&block).each_with_object([]) do |instruction, insns| insn = instruction.clone @@ -634,8 +657,63 @@ module Panda res << Prefix.new(p) end end + + def Gen.on_require(data) + Panda.wrap_data(data) + Quick.init + end end -def Gen.on_require(data) - Panda.wrap_data(data) +module Quick + module_function + def init + @format_hash = {} + @select = Hash.new { |h, k| h[k] = [] } + Panda.each_data_instruction.each do |insn| + add_to_quick(OpenStruct.new(insn)) if insn.namespace.nil? || !Panda.quickened_plugins[insn.namespace].nil? + end + end + + def add_to_quick(insn) + if !insn.namespace.nil? + insn.format = remove_pref(insn.format) + insn.prefix = "" + end + ins = Instruction.new(insn) + @format_hash[insn.format] = Format.new(insn.format) + if ins.namespace == "core" + Panda.quickened_plugins.each do |ns, used| + if used.include?(ins.mnemonic) + @select[ns].push(ins.clone) + end + end + else + raise "Plugin #{ins.namespace} is not quickened" if Panda.quickened_plugins[insn.namespace].nil? + @select[ins.namespace].push(ins) + end + end + + def format_hash + @format_hash + end + + def instructions + arr = Array.new + @select.each do |_, insns| + arr.concat(insns) + end + arr.uniq(&:opcode) + end + + def select + @select + end + + def formats + @format_hash.values.uniq(&:pretty).sort_by(&:pretty) + end + + def remove_pref(str) + str.sub('PREF_', '').sub('pref_', '') + end end diff --git a/libpandabase/mem/arena_allocator_stl_adapter.h b/libpandabase/mem/arena_allocator_stl_adapter.h index d5648104c36a5e17aa1724ae06425e772d88fdd4..e72ef3928fbf5114a6d9c5e8c2422890da6b9a49 100644 --- a/libpandabase/mem/arena_allocator_stl_adapter.h +++ b/libpandabase/mem/arena_allocator_stl_adapter.h @@ -27,10 +27,10 @@ class ArenaAllocatorAdapter; template class ArenaAllocatorAdapter { public: - // NOLINTNEXTLINE(readability-identifier-naming) - using value_type = void; - using Pointer = void *; - using ConstPointer = const void *; + // Naming is not by code style because we need to have allocator traits compatibility. Don't change it. + using value_type = void; // NOLINT(readability-identifier-naming) + using pointer = void *; // NOLINT(readability-identifier-naming) + using const_pointer = const void *; // NOLINT(readability-identifier-naming) template struct Rebind { @@ -63,14 +63,14 @@ private: template class ArenaAllocatorAdapter { public: - // NOLINTNEXTLINE(readability-identifier-naming) - using value_type = T; - using Pointer = T *; - using Reference = T &; - using ConstPointer = const T *; - using ConstReference = const T &; - using SizeType = size_t; - using DifferenceType = ptrdiff_t; + // Naming is not by code style because we need to have allocator traits compatibility. Don't change it. + using value_type = T; // NOLINT(readability-identifier-naming) + using pointer = T *; // NOLINT(readability-identifier-naming) + using reference = T &; // NOLINT(readability-identifier-naming) + using const_pointer = const T *; // NOLINT(readability-identifier-naming) + using const_reference = const T &; // NOLINT(readability-identifier-naming) + using size_type = size_t; // NOLINT(readability-identifier-naming) + using difference_type = ptrdiff_t; // NOLINT(readability-identifier-naming) template struct Rebind { @@ -103,32 +103,32 @@ public: ~ArenaAllocatorAdapter() = default; // NOLINTNEXTLINE(readability-identifier-naming) - SizeType max_size() const + size_type max_size() const { - return static_cast(-1) / sizeof(T); + return static_cast(-1) / sizeof(T); } // NOLINTNEXTLINE(readability-identifier-naming) - Pointer address(Reference x) const + pointer address(reference x) const { return &x; } // NOLINTNEXTLINE(readability-identifier-naming) - ConstReference address(ConstReference x) const + const_reference address(const_reference x) const { return &x; } // NOLINTNEXTLINE(readability-identifier-naming) - Pointer allocate(SizeType n, - [[maybe_unused]] typename ArenaAllocatorAdapter::Pointer ptr = nullptr) + pointer allocate(size_type n, + [[maybe_unused]] typename ArenaAllocatorAdapter::pointer ptr = nullptr) { ASSERT(n <= max_size()); return allocator_->template AllocArray(n); } // NOLINTNEXTLINE(readability-identifier-naming) - void deallocate([[maybe_unused]] Pointer p, [[maybe_unused]] SizeType n) {} + void deallocate([[maybe_unused]] pointer p, [[maybe_unused]] size_type n) {} template void construct(U *p, Args &&...args) // NOLINT(readability-identifier-naming) diff --git a/libpandabase/os/filesystem.cpp b/libpandabase/os/filesystem.cpp index 0a780f5d5b6c2b907ac3aec4893fffc200b2483e..d1366b2df5e885a7269959c0fb4effbf864bfa56 100644 --- a/libpandabase/os/filesystem.cpp +++ b/libpandabase/os/filesystem.cpp @@ -43,4 +43,10 @@ void CreateDirectories(const std::string &folder_name) #endif } +bool IsFileExists(const std::string &filepath) +{ + std::ifstream opened_file(filepath); + return opened_file.good(); +} + } // namespace panda::os diff --git a/libpandabase/os/filesystem.h b/libpandabase/os/filesystem.h index 4c81eb8cd3dddb4cfbec933424baa08eef5f9aee..94c5b3d8264ef2479db336ef88a8332be383a2ed 100644 --- a/libpandabase/os/filesystem.h +++ b/libpandabase/os/filesystem.h @@ -35,6 +35,8 @@ std::string GetAbsolutePath(std::string_view path); void CreateDirectories(const std::string &folder_name); +bool IsFileExists(const std::string &filepath); + } // namespace panda::os #endif // PANDA_FILESYSTEM_H diff --git a/libpandabase/os/thread.h b/libpandabase/os/thread.h index 374b6ba6db60f93dd01d7123b6596a6e00cb4b37..8ec1d2384d85ddbfbd9e4a6a0008d4be69a2e3e1 100644 --- a/libpandabase/os/thread.h +++ b/libpandabase/os/thread.h @@ -44,6 +44,7 @@ int SetThreadName(native_handle_type pthread_handle, const char *name); native_handle_type GetNativeHandle(); void Yield(); void NativeSleep(unsigned int ms); +void NativeSleepUS(std::chrono::microseconds us); void ThreadDetach(native_handle_type pthread_handle); void ThreadExit(void *ret); void ThreadJoin(native_handle_type pthread_handle, void **ret); diff --git a/libpandabase/templates/logger.yaml b/libpandabase/templates/logger.yaml index a11b22d00190d3faa7f122d72c6e51aced95b1b4..5721e15cc194c63ebca0a41951ced3daa0a6b79a 100644 --- a/libpandabase/templates/logger.yaml +++ b/libpandabase/templates/logger.yaml @@ -59,6 +59,7 @@ components: - name: quickener - name: irtoc - name: hotreload + - name: profiler dfx_components: - name: common diff --git a/libpandabase/utils/regmask.h b/libpandabase/utils/regmask.h index c70fe87868832ae4e4d24eca78ef5ae2300d342c..e9a44495ea6bfee230c7ee545fe0c759146a8ef5 100644 --- a/libpandabase/utils/regmask.h +++ b/libpandabase/utils/regmask.h @@ -171,6 +171,11 @@ public: return (sizeof(decltype(GetValue())) * BITS_PER_BYTE) - 1 - panda::Clz(GetValue()); } + constexpr static auto GetZeroMask() + { + return RegMaskImpl(); + } + constexpr Self operator~() const { return Self(~GetValue()); diff --git a/libpandafile/bytecode_instruction-inl.h b/libpandafile/bytecode_instruction-inl.h index 49892c4e82742bc7364993c15c6838bb4befe8bb..01969e6e8b21f58a4ef4aac9b1eaa4d2b43944c5 100644 --- a/libpandafile/bytecode_instruction-inl.h +++ b/libpandafile/bytecode_instruction-inl.h @@ -82,9 +82,10 @@ inline auto BytecodeInst::Read64(size_t offset, size_t width) const } template +template inline size_t BytecodeInst::GetSize() const { - return Size(GetFormat()); + return Size(GetFormat()); } #include diff --git a/libpandafile/bytecode_instruction.h b/libpandafile/bytecode_instruction.h index 596c631711eb532c3757de683a88770a45f6d30c..47e4b5ff755c4fcda43709c46823a2c32a325866 100644 --- a/libpandafile/bytecode_instruction.h +++ b/libpandafile/bytecode_instruction.h @@ -239,22 +239,28 @@ public: { } - template + template ::Opcode, size_t idx = 0> BytecodeId GetId() const; template uint16_t GetVReg() const; + template + uint16_t GetVReg(size_t idx) const; + template auto GetImm() const; + template ::Opcode> BytecodeId GetId(size_t idx = 0) const; void UpdateId(BytecodeId new_id, uint32_t idx = 0); + template ::Opcode> uint16_t GetVReg(size_t idx = 0) const; // Read imm and return it as int64_t/uint64_t + template ::Opcode> auto GetImm64(size_t idx = 0) const; /** @@ -271,7 +277,8 @@ public: * are more performance critical and compiler is not always clever enough to reduce them * to simple byte reads. */ - BytecodeInst::Opcode GetOpcode() const; + template ::Opcode> + EnumT GetOpcode() const; uint8_t GetPrimaryOpcode() const { @@ -323,9 +330,10 @@ public: return JumpTo(Size(format)); } + template ::Opcode> BytecodeInst GetNext() const { - return JumpTo(GetSize()); + return JumpTo(GetSize()); } const uint8_t *GetAddress() const @@ -370,10 +378,13 @@ public: template auto Read64(size_t offset, size_t width) const; + template ::Opcode> size_t GetSize() const; + template ::Opcode> Format GetFormat() const; + template ::Opcode> bool HasFlag(Flags flag) const; bool IsThrow(Exceptions exception) const; @@ -397,6 +408,12 @@ public: static constexpr bool HasImm(Format format, size_t idx); static constexpr size_t Size(Format format); + + template ::Opcode opcode> + static constexpr auto GetQuickened(); + + template ::Format format> + static constexpr auto GetQuickened(); }; template @@ -405,6 +422,38 @@ std::ostream &operator<<(std::ostream &os, const BytecodeInst &inst); using BytecodeInstruction = BytecodeInst; using BytecodeInstructionSafe = BytecodeInst; +template +class BytecodeInstructionResolver { +public: + template + static constexpr auto Get() + { + // NOLINTNEXTLINE(readability-braces-around-statements) + if constexpr (is_quickened) { + // NOLINTNEXTLINE(readability-magic-numbers) + return BytecodeInstruction::GetQuickened(); + // NOLINTNEXTLINE(readability-misleading-indentation) + } else { + // NOLINTNEXTLINE(readability-magic-numbers) + return opcode; + } + } + + template + static constexpr auto Get() + { + // NOLINTNEXTLINE(readability-braces-around-statements) + if constexpr (is_quickened) { + // NOLINTNEXTLINE(readability-magic-numbers) + return BytecodeInstruction::GetQuickened(); + // NOLINTNEXTLINE(readability-misleading-indentation) + } else { + // NOLINTNEXTLINE(readability-magic-numbers) + return format; + } + } +}; + } // namespace panda #endif // LIBANDAFILE_BYTECODE_INSTRUCTION_H_ diff --git a/libpandafile/file.h b/libpandafile/file.h index 9c149aacf948580342b1f4bc20c35266708af5fa..e3b03fdf7a6e43b5a0781ec3e931d81555340538 100644 --- a/libpandafile/file.h +++ b/libpandafile/file.h @@ -61,6 +61,7 @@ public: uint32_t file_size; uint32_t foreign_off; uint32_t foreign_size; + uint32_t quickened_flag; uint32_t num_classes; uint32_t class_idx_off; uint32_t num_lnps; diff --git a/libpandafile/file_item_container.cpp b/libpandafile/file_item_container.cpp index 9a541ef8bed7c4b99bcd55a517f06a419cefb636..056d3a1ae12f5cbac26dc28dd83716865fb0afe1 100644 --- a/libpandafile/file_item_container.cpp +++ b/libpandafile/file_item_container.cpp @@ -14,6 +14,7 @@ */ #include "file_item_container.h" +#include #include "macros.h" #include "file_format_version.h" #include "pgo.h" @@ -142,6 +143,7 @@ private: std::unordered_set items_; }; +// TODO(nsizov): make method for items deletion template static T *GetOrInsert(C &map, I &items, const P &pos, const E &key, bool is_foreign, Args &&...args) { @@ -426,6 +428,7 @@ uint32_t ItemContainer::ComputeLayout() uint32_t cur_offset = class_idx_offset + (num_classes + num_literalarrays) * ID_SIZE; UpdateOrderIndexes(); + UpdateLiteralIndexes(); RebuildIndexSection(); RebuildLineNumberProgramIndex(); @@ -519,6 +522,15 @@ void ItemContainer::UpdateOrderIndexes() end_->SetOrderIndex(idx++); } +void ItemContainer::UpdateLiteralIndexes() +{ + size_t idx = 0; + + for (auto &it : literalarray_map_) { + it.second->SetIndex(idx++); + } +} + void ItemContainer::ReorderItems(panda::panda_file::pgo::ProfileOptimizer *profile_opt) { profile_opt->ProfileGuidedRelayout(items_); @@ -623,6 +635,10 @@ bool ItemContainer::WriteHeader(Writer *writer, ssize_t *checksum_offset) return false; } + if (!writer->Write(static_cast(is_quickened_))) { + return false; + } + return WriteHeaderIndexInfo(writer); } diff --git a/libpandafile/file_item_container.h b/libpandafile/file_item_container.h index b58bc573de9a5ae8f1b5d1ea4fa3e2633fdd5cdc..34e9b5f21fbd6d254e1fc30f809a17da8b5784ab 100644 --- a/libpandafile/file_item_container.h +++ b/libpandafile/file_item_container.h @@ -70,6 +70,16 @@ public: LineNumberProgramItem *CreateLineNumberProgramItem(); + void SetQuickened() + { + is_quickened_ = true; + } + + bool IsQuickened() const + { + return is_quickened_; + } + template T *CreateItem(Args &&...args) { @@ -516,6 +526,8 @@ private: void UpdateOrderIndexes(); + void UpdateLiteralIndexes(); + void ProcessIndexDependecies(BaseItem *item); size_t GetForeignOffset() const; @@ -551,6 +563,8 @@ private: std::list>::iterator debug_items_end_; BaseItem *end_; + + bool is_quickened_ = false; }; } // namespace panda::panda_file diff --git a/libpandafile/file_items.h b/libpandafile/file_items.h index e1a67ad1910f0fb1826678b162287887aabf0f4d..163e799c86b059735c368884f5ffd169d98db252 100644 --- a/libpandafile/file_items.h +++ b/libpandafile/file_items.h @@ -834,6 +834,11 @@ public: debug_info_ = debug_info; } + DebugInfoItem *GetDebugInfo() + { + return debug_info_; + } + void AddRuntimeAnnotation(AnnotationItem *runtime_annotation) { runtime_annotations_.push_back(runtime_annotation); @@ -1094,6 +1099,11 @@ public: return &runtime_type_annotations_; } + SourceLang GetSourceLang() + { + return source_lang_; + } + DEFAULT_MOVE_SEMANTIC(ClassItem); DEFAULT_COPY_SEMANTIC(ClassItem); @@ -1272,6 +1282,26 @@ public: DEFAULT_MOVE_SEMANTIC(CatchBlock); DEFAULT_COPY_SEMANTIC(CatchBlock); + MethodItem *GetMethod() + { + return method_; + } + + BaseClassItem *GetType() + { + return type_; + } + + size_t GetHandlerPc() + { + return handler_pc_; + } + + size_t GetCodeSize() + { + return code_size_; + } + size_t CalculateSize() const override; bool Write(Writer *writer) override; @@ -1300,6 +1330,21 @@ public: DEFAULT_MOVE_SEMANTIC(TryBlock); DEFAULT_COPY_SEMANTIC(TryBlock); + size_t GetStartPc() const + { + return start_pc_; + } + + size_t GetLength() const + { + return length_; + } + + std::vector GetCatchBlocks() const + { + return catch_blocks_; + } + size_t CalculateSizeWithoutCatchBlocks() const; void ComputeLayout() override; @@ -1353,6 +1398,11 @@ public: return num_ins_; } + std::vector GetTryBlocks() + { + return try_blocks_; + } + void AddTryBlock(const TryBlock &try_block) { try_blocks_.push_back(try_block); @@ -1401,6 +1451,16 @@ public: return names; } + size_t GetNumVregs() + { + return num_vregs_; + } + + size_t GetNumArgs() + { + return num_args_; + } + DEFAULT_MOVE_SEMANTIC(CodeItem); DEFAULT_COPY_SEMANTIC(CodeItem); @@ -1607,8 +1667,19 @@ public: return ItemTypes::LITERAL_ARRAY_ITEM; } + void SetIndex(uint32_t index) + { + index_ = index; + } + + uint32_t GetIndex() const + { + return index_; + } + private: std::vector items_; + uint32_t index_ {0}; }; class AnnotationItem : public BaseItem { diff --git a/libpandafile/file_reader.cpp b/libpandafile/file_reader.cpp index 3acdf4fb2a3e037f64b5f7b4f3ab0e8d9ec2c1b9..2fab6e6be97e69d1a9c3ea531e201a89b19631dd 100644 --- a/libpandafile/file_reader.cpp +++ b/libpandafile/file_reader.cpp @@ -33,6 +33,9 @@ namespace panda::panda_file { bool FileReader::ReadContainer() { + const File::Header *header = file_->GetHeader(); + LOG_IF(header->quickened_flag, FATAL, PANDAFILE) << "File " << file_->GetFullFileName() << " is already quickened"; + if (!ReadClasses()) { return false; } @@ -49,126 +52,135 @@ bool FileReader::ReadContainer() } /* static */ -bool FileReader::CreateLiteralArrayItem(const LiteralDataAccessor::LiteralValue &lit_value, const LiteralTag &tag, - File::EntityId array_id) +bool FileReader::CreateLiteralArrayItem(LiteralDataAccessor *lit_array_accessor, File::EntityId array_id, + uint32_t index) { auto it = items_done_.find(array_id); if (it != items_done_.end()) { return true; } - LiteralArrayItem *item = container_.GetOrCreateLiteralArrayItem(std::to_string(array_id.GetOffset())); + LiteralArrayItem *item = container_.GetOrCreateLiteralArrayItem(std::to_string(index)); items_done_.insert({array_id, static_cast(item)}); - File::EntityId id(std::get(lit_value)); - auto sp = file_->GetSpanFromId(id); - std::vector literal_array; - literal_array.emplace_back(static_cast(tag)); - switch (tag) { - case panda_file::LiteralTag::BOOL: { - auto v = helpers::Read(&sp); - literal_array.emplace_back(static_cast(v)); - break; - } - case panda_file::LiteralTag::TAGVALUE: - case panda_file::LiteralTag::ACCESSOR: - case panda_file::LiteralTag::NULLVALUE: { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - break; - } - case panda_file::LiteralTag::ARRAY_U1: - case panda_file::LiteralTag::ARRAY_I8: - case panda_file::LiteralTag::ARRAY_U8: { - auto len = helpers::Read(&sp); - literal_array.emplace_back(len); - for (size_t i = 0; i < len; i++) { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - } - break; - } - case panda_file::LiteralTag::ARRAY_I16: - case panda_file::LiteralTag::ARRAY_U16: { - auto len = helpers::Read(&sp); - literal_array.emplace_back(len); - for (size_t i = 0; i < len; i++) { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - } - break; - } - case panda_file::LiteralTag::INTEGER: { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - break; - } - case panda_file::LiteralTag::ARRAY_I32: - case panda_file::LiteralTag::ARRAY_U32: - case panda_file::LiteralTag::ARRAY_F32: { - auto len = helpers::Read(&sp); - literal_array.emplace_back(len); - for (size_t i = 0; i < len; i++) { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - } - break; - } - case panda_file::LiteralTag::ARRAY_I64: - case panda_file::LiteralTag::ARRAY_U64: - case panda_file::LiteralTag::ARRAY_F64: { - auto len = helpers::Read(&sp); - literal_array.emplace_back(len); - for (size_t i = 0; i < len; i++) { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - } - break; - } - case panda_file::LiteralTag::FLOAT: { - auto v = helpers::Read(&sp); - literal_array.emplace_back(v); - break; - } - case panda_file::LiteralTag::DOUBLE: { - auto v = panda_file::helpers::Read(&sp); - literal_array.emplace_back(v); - break; - } - case panda_file::LiteralTag::STRING: { - File::EntityId str_id(helpers::Read(&sp)); - auto data = file_->GetStringData(str_id); - std::string item_str(utf::Mutf8AsCString(data.data)); - auto *string_item = container_.GetOrCreateStringItem(item_str); - literal_array.emplace_back(string_item); - break; - } - case panda_file::LiteralTag::ARRAY_STRING: { - auto len = helpers::Read(&sp); - literal_array.emplace_back(len); - for (size_t i = 0; i < len; i++) { - File::EntityId str_id(helpers::Read(&sp)); - auto data = file_->GetStringData(str_id); - std::string item_str(utf::Mutf8AsCString(data.data)); - auto *string_item = container_.GetOrCreateStringItem(item_str); - literal_array.emplace_back(string_item); + + lit_array_accessor->EnumerateLiteralVals( + array_id, [&literal_array, this](const panda_file::LiteralDataAccessor::LiteralValue &value, + const panda_file::LiteralTag &tag) { + literal_array.emplace_back(static_cast(tag)); + switch (tag) { + case panda_file::LiteralTag::BOOL: { + literal_array.emplace_back(static_cast(std::get(value))); + break; + } + case panda_file::LiteralTag::TAGVALUE: + case panda_file::LiteralTag::ACCESSOR: + case panda_file::LiteralTag::NULLVALUE: { + literal_array.emplace_back(std::get(value)); + break; + } + case panda_file::LiteralTag::ARRAY_U1: + case panda_file::LiteralTag::ARRAY_I8: + case panda_file::LiteralTag::ARRAY_U8: { + File::EntityId id(std::get(value)); + auto sp = file_->GetSpanFromId(id); + auto len = helpers::Read(&sp); + literal_array.emplace_back(len); + for (size_t i = 0; i < len; i++) { + auto v = helpers::Read(&sp); + literal_array.emplace_back(v); + } + break; + } + case panda_file::LiteralTag::ARRAY_I16: + case panda_file::LiteralTag::ARRAY_U16: { + File::EntityId id(std::get(value)); + auto sp = file_->GetSpanFromId(id); + auto len = helpers::Read(&sp); + literal_array.emplace_back(len); + for (size_t i = 0; i < len; i++) { + auto v = helpers::Read(&sp); + literal_array.emplace_back(v); + } + break; + } + case panda_file::LiteralTag::INTEGER: { + literal_array.emplace_back(std::get(value)); + break; + } + case panda_file::LiteralTag::ARRAY_I32: + case panda_file::LiteralTag::ARRAY_U32: + case panda_file::LiteralTag::ARRAY_F32: { + File::EntityId id(std::get(value)); + auto sp = file_->GetSpanFromId(id); + auto len = helpers::Read(&sp); + literal_array.emplace_back(len); + for (size_t i = 0; i < len; i++) { + auto v = helpers::Read(&sp); + literal_array.emplace_back(v); + } + break; + } + case panda_file::LiteralTag::ARRAY_I64: + case panda_file::LiteralTag::ARRAY_U64: + case panda_file::LiteralTag::ARRAY_F64: { + File::EntityId id(std::get(value)); + auto sp = file_->GetSpanFromId(id); + auto len = helpers::Read(&sp); + literal_array.emplace_back(len); + for (size_t i = 0; i < len; i++) { + auto v = helpers::Read(&sp); + literal_array.emplace_back(v); + } + break; + } + case panda_file::LiteralTag::FLOAT: { + literal_array.emplace_back(bit_cast(std::get(value))); + break; + } + case panda_file::LiteralTag::DOUBLE: { + literal_array.emplace_back(bit_cast(std::get(value))); + break; + } + case panda_file::LiteralTag::STRING: { + File::EntityId id(std::get(value)); + auto data = file_->GetStringData(id); + std::string item_str(utf::Mutf8AsCString(data.data)); + auto *string_item = container_.GetOrCreateStringItem(item_str); + literal_array.emplace_back(string_item); + break; + } + case panda_file::LiteralTag::ARRAY_STRING: { + File::EntityId id(std::get(value)); + auto sp = file_->GetSpanFromId(id); + auto len = helpers::Read(&sp); + literal_array.emplace_back(len); + for (size_t i = 0; i < len; i++) { + File::EntityId str_id(helpers::Read(&sp)); + auto data = file_->GetStringData(str_id); + std::string item_str(utf::Mutf8AsCString(data.data)); + auto *string_item = container_.GetOrCreateStringItem(item_str); + literal_array.emplace_back(string_item); + } + break; + } + case panda_file::LiteralTag::METHOD: + case panda_file::LiteralTag::GENERATORMETHOD: + case panda_file::LiteralTag::ASYNCGENERATORMETHOD: { + File::EntityId method_id(std::get(value)); + MethodDataAccessor method_acc(*file_, method_id); + auto name = method_acc.GetName(); + (void)name; + File::EntityId class_id(method_acc.GetClassId()); + auto *class_item = CreateClassItem(class_id); + literal_array.emplace_back(CreateMethodItem(class_item, method_id)); + break; + } + default: + UNREACHABLE(); } - break; - } - case panda_file::LiteralTag::METHOD: - case panda_file::LiteralTag::GENERATORMETHOD: - case panda_file::LiteralTag::ASYNCGENERATORMETHOD: { - File::EntityId method_id(helpers::Read(&sp)); - MethodDataAccessor method_acc(*file_, method_id); - File::EntityId class_id(method_acc.GetClassId()); - auto *class_item = CreateClassItem(class_id); - literal_array.emplace_back(CreateMethodItem(class_item, method_id)); - break; - } - default: - UNREACHABLE(); - } + }); item->AddItems(literal_array); @@ -583,7 +595,7 @@ DebugInfoItem *FileReader::CreateDebugInfoItem(File::EntityId debug_info_id) auto *debug_info_item = container_.CreateItem(lnp_item); items_done_.insert({debug_info_id, static_cast(debug_info_item)}); - DebugInfoDataAccessor debug_acc(*file_, debug_info_id); + panda_file::DebugInfoDataAccessor debug_acc(*file_, debug_info_id); debug_info_item->SetLineNumber(debug_acc.GetLineStart()); debug_acc.EnumerateParameters([&](File::EntityId param_id) { @@ -971,9 +983,9 @@ bool FileReader::ReadLiteralArrayItems() for (size_t i = 0; i < num_litarrays; i++) { auto id = lit_array_accessor.GetLiteralArrayId(i); - lit_array_accessor.EnumerateLiteralVals( - id, [id, this](const panda_file::LiteralDataAccessor::LiteralValue &value, - const panda_file::LiteralTag &tag) { CreateLiteralArrayItem(value, tag, id); }); + if (!CreateLiteralArrayItem(&lit_array_accessor, id, i)) { + return false; + } } return true; @@ -1142,6 +1154,10 @@ void FileReader::UpdateDebugInfoDependecies(File::EntityId debug_info_id) void FileReader::UpdateDebugInfo(DebugInfoItem *debug_info_item, File::EntityId debug_info_id) { auto *lnp_item = debug_info_item->GetLineNumberProgram(); + if (!lnp_item->GetData().empty()) { + return; + } + DebugInfoDataAccessor debug_acc(*file_, debug_info_id); const uint8_t *program = debug_acc.GetLineNumberProgram(); auto size = file_->GetSpanFromId(file_->GetIdFromPointer(program)).size(); @@ -1241,6 +1257,10 @@ void FileReader::UpdateDebugInfo(DebugInfoItem *debug_info_item, File::EntityId lnp_item->EmitSetFile(debug_info_item->GetConstantPool(), source_code_item); break; } + case LineNumberProgramItem::Opcode::SET_COLUMN: { + lnp_item->EmitColumn(debug_info_item->GetConstantPool(), 0, state.ReadULeb128()); + break; + } default: { auto opcode_value = static_cast(opcode); auto adjust_opcode = opcode_value - LineNumberProgramItem::OPCODE_BASE; @@ -1364,6 +1384,8 @@ void FileReader::ComputeLayoutAndUpdateIndices() container_.DeduplicateItems(false); container_.ComputeLayout(); + std::unordered_set code_items_done; + // Third pass, update bytecode indices for (const auto &it : *class_map) { auto *base_class_item = it.second; @@ -1371,10 +1393,12 @@ void FileReader::ComputeLayoutAndUpdateIndices() continue; } auto *class_item = static_cast(base_class_item); - class_item->VisitMethods([this, &reverse_done](BaseItem *param_item) { + class_item->VisitMethods([this, &reverse_done, &code_items_done](BaseItem *param_item) { auto *method_item = static_cast(param_item); auto *code_item = method_item->GetCode(); - if (code_item == nullptr) { + + auto code_it = code_items_done.find(code_item); + if (code_item == nullptr || code_it != code_items_done.end()) { return true; } @@ -1415,17 +1439,14 @@ void FileReader::ComputeLayoutAndUpdateIndices() std::string item_str(utf::Mutf8AsCString(data.data)); auto *string_item = container_.GetOrCreateStringItem(item_str); inst.UpdateId(BytecodeId(string_item->GetFileId().GetOffset())); - } else if (inst.HasFlag(Flags::LITERALARRAY_ID)) { - BytecodeId b_id = inst.GetId(); - File::EntityId old_id = b_id.AsFileId(); - ASSERT(items_done_.find(old_id) != items_done_.end()); - auto *array_item = items_done_.find(old_id)->second; - inst.UpdateId(BytecodeId(array_item->GetFileId().GetOffset())); } offset += inst.GetSize(); inst = inst.GetNext(); } + + code_items_done.insert(code_item); + return true; }); } diff --git a/libpandafile/file_reader.h b/libpandafile/file_reader.h index c542fb751a131c0bdbd4d2956fb85d6244ef08c5..5176a37e143bf73cced1b1f0837f0863c028ef95 100644 --- a/libpandafile/file_reader.h +++ b/libpandafile/file_reader.h @@ -59,6 +59,16 @@ public: return &container_; } + const File *GetFilePtr() + { + return file_.get(); + } + + const std::map *GetItems() + { + return &items_done_; + } + void ComputeLayoutAndUpdateIndices(); NO_COPY_SEMANTIC(FileReader); @@ -69,8 +79,7 @@ private: bool ReadIndexHeaders(); bool ReadClasses(); - bool CreateLiteralArrayItem(const LiteralDataAccessor::LiteralValue &lit_value, const LiteralTag &tag, - File::EntityId array_id); + bool CreateLiteralArrayItem(LiteralDataAccessor *lit_array_accessor, File::EntityId array_id, uint32_t index); AnnotationItem *CreateAnnotationItem(File::EntityId ann_id); MethodItem *CreateMethodItem(ClassItem *cls, File::EntityId method_id); ForeignMethodItem *CreateForeignMethodItem(BaseClassItem *fcls, File::EntityId method_id); diff --git a/libpandafile/pandafile_isapi.rb b/libpandafile/pandafile_isapi.rb index c723d9e8793f825c6e5108241e1d1b67995f48cc..e877c76f7132e4bfbb1d0ffe5a6f455da469b724 100755 --- a/libpandafile/pandafile_isapi.rb +++ b/libpandafile/pandafile_isapi.rb @@ -76,3 +76,7 @@ end def insns_uniq_sort_fmts Panda.instructions.uniq { |i| i.format.pretty }.sort_by { |insn| insn.format.pretty } end + +def q_insns_uniq_sort_fmts + (Panda.instructions + Quick::instructions).uniq { |i| i.format.pretty }.sort_by { |insn| insn.format.pretty } +end diff --git a/libpandafile/templates/bytecode_instruction-inl_gen.h.erb b/libpandafile/templates/bytecode_instruction-inl_gen.h.erb index 7cad68a490139c762fac06c955adf23e8f877ff1..a19cca24de2c25d132ef2efacce31f3828dbc3d9 100644 --- a/libpandafile/templates/bytecode_instruction-inl_gen.h.erb +++ b/libpandafile/templates/bytecode_instruction-inl_gen.h.erb @@ -17,7 +17,7 @@ template constexpr bool BytecodeInst::HasId(Format format, size_t idx) { switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:id?) % next if n == 0 @@ -36,7 +36,7 @@ constexpr bool BytecodeInst::HasId(Format format, size_t idx) { template constexpr bool BytecodeInst::HasVReg(Format format, size_t idx) { switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:reg?) % next if n == 0 @@ -55,7 +55,7 @@ constexpr bool BytecodeInst::HasVReg(Format format, size_t idx) { template constexpr bool BytecodeInst::HasImm(Format format, size_t idx) { switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:imm?) % next if n == 0 @@ -86,11 +86,10 @@ constexpr size_t BytecodeInst::Size(Format format) { // NOLINT(readabilit } template -template ::Format format, size_t idx /* = 0 */> +template ::Format format, typename EnumT, size_t idx /* = 0 */> inline BytecodeId BytecodeInst::GetId() const { static_assert(HasId(format, idx), "Instruction doesn't have id operand with such index"); - -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:id?) % next if n == 0 @@ -120,7 +119,7 @@ inline void BytecodeInst::UpdateId(BytecodeId new_id, uint32_t idx /* = 0 return; } -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:id?) % next if n == 0 @@ -142,9 +141,10 @@ inline void BytecodeInst::UpdateId(BytecodeId new_id, uint32_t idx /* = 0 UNREACHABLE(); } -template +template +template inline BytecodeId BytecodeInst::GetId(size_t idx /* = 0 */) const { - Format format = GetFormat(); + Format format = GetFormat(); ASSERT_PRINT(HasId(format, idx), "Instruction doesn't have id operand with such index"); if (!HasId(format, idx)) { @@ -152,7 +152,7 @@ inline BytecodeId BytecodeInst::GetId(size_t idx /* = 0 */) const { } switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:id?) % next if n == 0 @@ -180,8 +180,7 @@ template ::Format format, size_t idx /* = 0 */> __attribute__ ((visibility("hidden"))) ALWAYS_INLINE inline uint16_t BytecodeInst::GetVReg() const { // NOLINT(readability-function-size) static_assert(HasVReg(format, idx), "Instruction doesn't have vreg operand with such index"); - -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:reg?) % next if n == 0 @@ -202,10 +201,11 @@ ALWAYS_INLINE inline uint16_t BytecodeInst::GetVReg() const { // NOLINT(r UNREACHABLE(); } -template +template +template __attribute__ ((visibility("hidden"))) ALWAYS_INLINE inline uint16_t BytecodeInst::GetVReg(size_t idx /* = 0 */) const { // NOLINT(readability-function-size) - Format format = GetFormat(); + Format format = GetFormat(); ASSERT_PRINT(HasVReg(format, idx), "Instruction doesn't have vreg operand with such index"); if (!HasVReg(format, idx)) { @@ -213,7 +213,7 @@ ALWAYS_INLINE inline uint16_t BytecodeInst::GetVReg(size_t idx /* = 0 */) } switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:reg?) % next if n == 0 @@ -257,12 +257,35 @@ ALWAYS_INLINE inline int BytecodeInst::GetProfileId() const { // NOLINTNE } } +template +template ::Format format> +__attribute__ ((visibility("hidden"))) +ALWAYS_INLINE inline uint16_t BytecodeInst::GetVReg(size_t idx /* = 0 */) const { // NOLINTNEXTLINE(readability-function-size) + ASSERT_PRINT(HasVReg(format, idx), "Instruction doesn't have vreg operand with such index"); + +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% fmt = i.format +% n = i.operands.count(&:reg?) +% next if n == 0 +% +% reg_ops = i.operands.select(&:reg?) +% offsets = reg_ops.map(&:offset) +% widths = reg_ops.map(&:width) +% + if constexpr (format == Format::<%= fmt.pretty.upcase %>) { + constexpr std::array> OFFSETS{<%= offsets.join(", ") %>}; + constexpr std::array> WIDTHS{<%= widths.join(", ") %>}; + return static_cast(Read64(OFFSETS[idx], WIDTHS[idx])); + } +% end + UNREACHABLE(); +} + template template ::Format format, size_t idx /* = 0 */> inline auto BytecodeInst::GetImm() const { // NOLINT(readability-function-size) static_assert(HasImm(format, idx), "Instruction doesn't have imm operand with such index"); - -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:imm?) % next if n == 0 @@ -284,8 +307,9 @@ inline auto BytecodeInst::GetImm() const { // NOLINT(readability-function } template +template inline auto BytecodeInst::GetImm64(size_t idx /* = 0 */) const { - Format format = GetFormat(); + Format format = GetFormat(); ASSERT_PRINT(HasImm(format, idx), "Instruction doesn't have imm operand with such index"); if (!HasImm(format, idx)) { @@ -293,7 +317,7 @@ inline auto BytecodeInst::GetImm64(size_t idx /* = 0 */) const { } switch (format) { -% insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| +% q_insns_uniq_sort_fmts.each do |i| # Panda::formats.each do |fmt| % fmt = i.format % n = i.operands.count(&:imm?) % next if n == 0 @@ -317,13 +341,14 @@ inline auto BytecodeInst::GetImm64(size_t idx /* = 0 */) const { } template -inline typename BytecodeInst::Opcode BytecodeInst::GetOpcode() const { +template +inline EnumT BytecodeInst::GetOpcode() const { uint8_t primary = GetPrimaryOpcode(); if (primary >= <%= Panda::prefixes.map(&:opcode_idx).min %>) { // NOLINT(readability-magic-numbers) uint8_t secondary = GetSecondaryOpcode(); - return static_cast((secondary << 8U) | primary); // NOLINT(hicpp-signed-bitwise) + return static_cast((secondary << 8U) | primary); // NOLINT(hicpp-signed-bitwise) } - return static_cast(primary); + return static_cast(primary); } template @@ -344,10 +369,11 @@ inline bool BytecodeInst::IsPrefixed() const { } template +template inline typename BytecodeInst::Format BytecodeInst::GetFormat() const { // NOLINT(readability-function-size) - switch(GetOpcode()) { + switch(GetOpcode()) { % Panda::instructions.each do |i| - case BytecodeInst::Opcode::<%= i.opcode.upcase %>: + case EnumT::<%= i.opcode.upcase %>: return BytecodeInst::Format::<%= i.format.pretty.upcase %>; % end default: @@ -357,14 +383,34 @@ inline typename BytecodeInst::Format BytecodeInst::GetFormat() const UNREACHABLE(); } +% Panda.quickened_plugins.each_key do |namespace| +% enum_name = namespace.upcase +template <> +template <> +inline typename BytecodeInst::Format BytecodeInst::GetFormat_Opcode>() const { // NOLINT(readability-function-size) + switch(GetOpcode_Opcode>()) { +% Quick::select[namespace].each do |i| + case BytecodeInstruction::<%= enum_name %>_Opcode::<%= i.opcode.upcase %>: + return BytecodeInst::Format::<%= i.format.pretty.upcase %>; +% end + default: + break; + } + + UNREACHABLE(); +} +% end + +template +template // NOLINTNEXTLINE(readability-function-size) -template inline bool BytecodeInst::HasFlag(Flags flag) const { - switch(GetOpcode()) { +inline bool BytecodeInst::HasFlag(Flags flag) const { + switch(GetOpcode()) { % Panda::instructions.each do |i| % flag_array = i.properties.map {|prop| "Flags::" + prop.upcase} % flag_array += ['0'] if flag_array.empty? % flags = flag_array.join(' | ') - case BytecodeInst::Opcode::<%= i.opcode.upcase %>: + case EnumT::<%= i.opcode.upcase %>: return ((<%= flags %>) & flag) == flag; // NOLINT(hicpp-signed-bitwise) % end default: @@ -374,6 +420,27 @@ template inline bool BytecodeInst::HasFlag(Fl UNREACHABLE(); } +% Panda.quickened_plugins.each_key do |namespace| +% enum_name = namespace.upcase +template <> +template <> +inline bool BytecodeInst::HasFlag_Opcode>(Flags flag) const { + switch(GetOpcode_Opcode>()) { +% Quick::select[namespace].each do |i| +% flag_array = i.properties.map {|prop| "Flags::" + prop.upcase} +% flag_array += ['0'] if flag_array.empty? +% flags = flag_array.join(' | ') + case BytecodeInst::<%= enum_name %>_Opcode::<%= i.opcode.upcase %>: + return ((<%= flags %>) & flag) == flag; // NOLINT(hicpp-signed-bitwise) +% end + default: + return false; + } + + UNREACHABLE(); +} +% end + // NOLINTNEXTLINE(readability-function-size) template inline bool BytecodeInst::IsThrow(Exceptions exception) const { switch(GetOpcode()) { @@ -416,7 +483,7 @@ template std::ostream& operator<<(std::ostream& os, % next if op.prof? % op_str = "\"#{sep}v\" << inst.template GetVReg::Format::#{inst.format.pretty.upcase}, #{idx}>()" if op.reg? % op_str = "\"#{sep}\" << inst.template GetImm::Format::#{inst.format.pretty.upcase}, #{idx}>()" if op.imm? -% op_str = "\"#{sep}id\" << inst.template GetId::Format::#{inst.format.pretty.upcase}, #{idx}>()" if op.id? +% op_str = "\"#{sep}id\" << inst.template GetId::Format::#{inst.format.pretty.upcase}, typename BytecodeInst::Opcode, #{idx}>()" if op.id? os << <%= op_str %>; % sep = ', ' % end @@ -460,3 +527,34 @@ inline bool BytecodeInst::IsPrimaryOpcodeValid() const } return true; } + +% Panda.quickened_plugins.each_key do |namespace| +% enum_name = namespace.upcase +template +template ::Opcode opcode> +constexpr auto BytecodeInst::GetQuickened() { +% Panda::instructions.select{|b| b.namespace == namespace}.each do |i| + if constexpr (opcode == BytecodeInst::Opcode::<%= i.opcode.upcase %>) { + return BytecodeInst::<%= enum_name %>_Opcode::<%= Quick::remove_pref(i.opcode.upcase) %>; + } else +% end + { + enum { IMPOSSIBLE_CASE = false }; + static_assert(IMPOSSIBLE_CASE, "Impossible case"); + } +} +% end + +template +template ::Format format> +constexpr auto BytecodeInst::GetQuickened() { +% Panda::formats.each do |fmt| + if constexpr (format == BytecodeInst::Format::<%= fmt.pretty.upcase %>) { + return BytecodeInst::Format::<%= Quick::remove_pref(fmt.pretty.upcase) %>; + } else +% end + { + enum { IMPOSSIBLE_CASE = false }; + static_assert(IMPOSSIBLE_CASE, "Impossible case"); + } +} diff --git a/libpandafile/templates/bytecode_instruction_enum_gen.h.erb b/libpandafile/templates/bytecode_instruction_enum_gen.h.erb index d27641e32fdc652c05ed1f51c44382f60df19cd2..02ec95dcbd75fba1c8c21e6fdf8ecb662f45a8db 100644 --- a/libpandafile/templates/bytecode_instruction_enum_gen.h.erb +++ b/libpandafile/templates/bytecode_instruction_enum_gen.h.erb @@ -14,8 +14,8 @@ */ enum class Format : uint8_t { -% Panda::formats.each do |fmt| - <%= fmt.pretty.upcase %>, +% Panda::formats.map(&:pretty).map(&:upcase).each do |fmt| + <%= fmt %>, % end }; @@ -26,6 +26,16 @@ enum class Opcode { LAST = <%= Panda::instructions.last().opcode.upcase %> }; +% Panda.quickened_plugins.each_key do |namespace| +% enum_name = namespace.upcase +enum class <%= enum_name %>_Opcode { +% Quick::select[namespace].each_with_index do |i, index| + <%= i.opcode.upcase %> = <%= index %>, +% end + LAST = <%= Quick::select[namespace].last().opcode.upcase %> +}; +% end + enum Flags : uint32_t { % Panda::properties.each_with_index do |f, i| <%= f.tag.upcase %> = <%= format("0x%x", 1 << i) %>, diff --git a/libpandafile/tests/bytecode_instruction_tests.cpp b/libpandafile/tests/bytecode_instruction_tests.cpp index 96601b342ea1087f7b59972223a64bb53fa8fe6d..1583af03361c904b79124978bf5e0dc23a33a834 100644 --- a/libpandafile/tests/bytecode_instruction_tests.cpp +++ b/libpandafile/tests/bytecode_instruction_tests.cpp @@ -211,7 +211,7 @@ TEST(BytecodeInstruction, Parse) const uint8_t bytecode[] = {0x00, 0xf1, 0xee, 0xcd, 0xab, 0xff}; BytecodeInstruction inst(bytecode); EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); - EXPECT_EQ((inst.GetId()), BytecodeId(0xabcdeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xabcdeef1)); } // V4_V4_ID16 @@ -221,7 +221,7 @@ TEST(BytecodeInstruction, Parse) EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); EXPECT_EQ((inst.GetVReg()), 0x1); EXPECT_EQ((inst.GetVReg()), 0x2); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } // V8_ID16 @@ -230,7 +230,7 @@ TEST(BytecodeInstruction, Parse) BytecodeInstruction inst(bytecode); EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); EXPECT_EQ((inst.GetVReg()), 0x12); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } // V4_V4_V4_V4_ID16 @@ -242,7 +242,7 @@ TEST(BytecodeInstruction, Parse) EXPECT_EQ((inst.GetVReg()), 0x2); EXPECT_EQ((inst.GetVReg()), 0x3); EXPECT_EQ((inst.GetVReg()), 0x4); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } } @@ -443,7 +443,7 @@ TEST(BytecodeInstructionSafe, Parse) const uint8_t bytecode[] = {0x00, 0xf1, 0xee, 0xcd, 0xab, 0xff}; BytecodeInstructionSafe inst(bytecode, &bytecode[0], &bytecode[sizeof(bytecode) - 1]); EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); - EXPECT_EQ((inst.GetId()), BytecodeId(0xabcdeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xabcdeef1)); } // V4_V4_ID16 @@ -453,7 +453,7 @@ TEST(BytecodeInstructionSafe, Parse) EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); EXPECT_EQ((inst.GetVReg()), 0x1); EXPECT_EQ((inst.GetVReg()), 0x2); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } // V8_ID16 @@ -462,7 +462,7 @@ TEST(BytecodeInstructionSafe, Parse) BytecodeInstructionSafe inst(bytecode, &bytecode[0], &bytecode[sizeof(bytecode) - 1]); EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); EXPECT_EQ((inst.GetVReg()), 0x12); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } // V4_V4_V4_V4_ID16 @@ -474,7 +474,7 @@ TEST(BytecodeInstructionSafe, Parse) EXPECT_EQ((inst.GetVReg()), 0x2); EXPECT_EQ((inst.GetVReg()), 0x3); EXPECT_EQ((inst.GetVReg()), 0x4); - EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xeef1)); } // Negative tests @@ -647,7 +647,7 @@ TEST(BytecodeInstructionSafe, Parse) BytecodeInstructionSafe inst(bytecode, &bytecode[0], &bytecode[sizeof(bytecode) - 2]); EXPECT_EQ(static_cast(inst.GetOpcode()), 0x00); EXPECT_TRUE(inst.IsValid()); - EXPECT_EQ((inst.GetId()), BytecodeId(0xcdeef1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xcdeef1)); EXPECT_FALSE(inst.IsValid()); } @@ -661,7 +661,7 @@ TEST(BytecodeInstructionSafe, Parse) EXPECT_TRUE(inst.IsValid()); EXPECT_EQ((inst.GetVReg()), 0x2); EXPECT_TRUE(inst.IsValid()); - EXPECT_EQ((inst.GetId()), BytecodeId(0xf1)); + EXPECT_EQ((inst.GetId()), BytecodeId(0xf1)); EXPECT_FALSE(inst.IsValid()); } @@ -673,7 +673,7 @@ TEST(BytecodeInstructionSafe, Parse) EXPECT_TRUE(inst.IsValid()); EXPECT_EQ((inst.GetVReg()), 0x12); EXPECT_TRUE(inst.IsValid()); - EXPECT_EQ((inst.GetId()), BytecodeId(0x00)); + EXPECT_EQ((inst.GetId()), BytecodeId(0x00)); EXPECT_FALSE(inst.IsValid()); } @@ -691,7 +691,7 @@ TEST(BytecodeInstructionSafe, Parse) EXPECT_TRUE(inst.IsValid()); EXPECT_EQ((inst.GetVReg()), 0x4); EXPECT_TRUE(inst.IsValid()); - EXPECT_EQ((inst.GetId()), BytecodeId(0x0)); + EXPECT_EQ((inst.GetId()), BytecodeId(0x0)); EXPECT_FALSE(inst.IsValid()); } } diff --git a/pandastdlib/CMakeLists.txt b/pandastdlib/CMakeLists.txt index e4fa1a3a43ad075eaccf2c3e0289679340894783..667390582eb3820b036a27eca4a7a996af292c22 100644 --- a/pandastdlib/CMakeLists.txt +++ b/pandastdlib/CMakeLists.txt @@ -17,13 +17,23 @@ project(arkstdlib) add_panda_assembly(TARGET arkstdlib SOURCE pandastdlib.pa) +if(CMAKE_CROSSCOMPILING) + ExternalProject_Get_Property(panda_host_tools binary_dir) + set(arkquick_target panda_host_tools) + set(arkquick_bin "${binary_dir}/quickener/arkquick") +else() + set(arkquick_target arkquick) + set(arkquick_bin $) +endif() + # TODO: remove after all components will use arkstdlib.abc instead of pandastdlib.bin -add_custom_target(pandastdlib ALL - COMMAND ${CMAKE_COMMAND} -E create_symlink "${CMAKE_CURRENT_BINARY_DIR}/arkstdlib.abc" "${CMAKE_CURRENT_BINARY_DIR}/pandastdlib.bin") +add_custom_target(pandastdlib) -add_dependencies(pandastdlib arkstdlib) +add_dependencies(pandastdlib arkstdlib arkquick) add_custom_command(TARGET arkstdlib POST_BUILD COMMAND ${CMAKE_COMMAND} -E create_symlink "arkstdlib.abc" "pandastdlib.bin" + COMMAND ${arkquick_bin} "${CMAKE_CURRENT_BINARY_DIR}/arkstdlib.abc" "${CMAKE_CURRENT_BINARY_DIR}/arkstdlib.abcq" + COMMAND ${CMAKE_COMMAND} -E create_symlink "arkstdlib.abcq" "pandastdlib.binq" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/patches/rescuepod1/0004-Implement-exception-throwing-and-handling-in-irtoc-c.patch b/patches/rescuepod1/0004-Implement-exception-throwing-and-handling-in-irtoc-c.patch deleted file mode 100644 index add1cd7e5a3aeffccf618520b61f896a40bc00e8..0000000000000000000000000000000000000000 --- a/patches/rescuepod1/0004-Implement-exception-throwing-and-handling-in-irtoc-c.patch +++ /dev/null @@ -1,1799 +0,0 @@ -From a603589a0d6177dc6456dc73bff00a316bfbf304 Mon Sep 17 00:00:00 2001 -From: WX929229 <> -Date: Fri, 27 May 2022 18:05:46 +0300 -Subject: [PATCH 04/52] Implement exception throwing and handling in irtoc core - TicketNo:_internal_ Description:Implement exception throwing and handling in - irtoc core Team:ARK Feature or Bugfix:Feature Binary Source:No - PrivateCode(Yes/No):No - -Change-Id: I33239d74c61eef7565a35ea62e861f1df65019e5 ---- - irtoc/scripts/common.irt | 4 + - irtoc/scripts/interpreter.irt | 869 +++++++++++++----- - .../irtoc_scripts/interpreter_handlers.irt | 6 + - .../irtoc_scripts/interpreter_main_loop.irt | 16 +- - .../tests/runtime/irtoc/advanced/advanced.js | 24 +- - .../tests/runtime/irtoc/basic/basic.js | 32 + - runtime/entrypoints/entrypoints.cpp | 223 ++++- - tests/CMakeLists.txt | 38 +- - .../arithmetic_exception.pa | 35 + - .../irtoc-opcode-suite/throw.pa | 39 + - 10 files changed, 988 insertions(+), 298 deletions(-) - create mode 100644 tests/irtoc-interpreter-tests/arithmetic_exception.pa - create mode 100644 tests/irtoc-interpreter-tests/irtoc-opcode-suite/throw.pa - -diff --git a/irtoc/scripts/common.irt b/irtoc/scripts/common.irt -index 14171c44be..efa93d6cad 100644 ---- a/irtoc/scripts/common.irt -+++ b/irtoc/scripts/common.irt -@@ -89,6 +89,7 @@ module Constants - GET_VTABLE_INDEX = "cross_values::GetMethodVTableIndexOffset(GetArch())" - CLASS_STATE_OFFSET = "cross_values::GetClassStateOffset(GetArch())" - ARRAY_LENGTH_OFFSET = "cross_values::GetCoretypesArrayLengthOffset(GetArch())" -+ ARRAY_DATA_OFFSET = "cross_values::GetCoretypesArrayDataOffset(GetArch())" - VREGISTERS_NUM_OFFSET = "cross_values::GetFrameNumVregsOffset(GetArch())" - VREGISTERS_OFFSET = "cross_values::GetFrameVregsOffset(GetArch())" - VREGISTER_SIZE = "cross_values::GetFrameVregisterSize(GetArch())" -@@ -98,6 +99,7 @@ module Constants - RESOLVE_VIRTUAL_CALL_AOT = "cross_values::GetManagedThreadEntrypointOffset(GetArch(), EntrypointId::RESOLVE_VIRTUAL_CALL_AOT)" - GET_CLASS_METHODS_OFFSET = "cross_values::GetClassMethodsOffset(GetArch())" - FRAME_METHOD_OFFSET = "Frame::GetMethodOffset()" -+ THREAD_EXCEPTION_OFFSET = "cross_values::GetManagedThreadExceptionOffset(GetArch())" - FRAME_INSTRUCTIONS_OFFSET = "Frame::GetInstructionsOffset()" - MARK_WORD_OFFSET = "cross_values::GetObjectHeaderMarkWordOffset(GetArch())" - INTERNAL_THREAD_ID_OFFSET = "cross_values::GetManagedThreadInternalIdOffset(GetArch())" -@@ -138,6 +140,8 @@ module Constants - DYN_INT_TYPE = "AnyBaseType::UNDEFINED_TYPE" - DYN_DOUBLE_TYPE = "AnyBaseType::UNDEFINED_TYPE" - -+ REFERENCE_TYPE_SHIFT = "compiler::DataType::ShiftByType(compiler::DataType::REFERENCE, GetArch())" -+ - end - macro(:call_runtime) { |e, *args| - entry := LoadI(%tr).Imm(e).ptr -diff --git a/irtoc/scripts/interpreter.irt b/irtoc/scripts/interpreter.irt -index ba12d51a32..d8814fb8aa 100644 ---- a/irtoc/scripts/interpreter.irt -+++ b/irtoc/scripts/interpreter.irt -@@ -436,49 +436,100 @@ macro(:acc_receiver) do |op, imm| - Phi(res1, res2).ref - end - -+macro(:find_catch_block) do -+ handler_pc := call_runtime("FindCatchBlockInIFrames", %tr, %frame, pc).ptr -+ If(handler_pc, pc).CC(:CC_EQ).b { -+ Intrinsic(:INTERPRETER_RETURN).ptr -+ } -+ frame_eh := LoadI(%tr).Imm("ManagedThread::GetFrameOffset()").ptr -+ Phi(handler_pc, handler_pc).ptr -+end -+ -+['ne', 'eq'].each do |cc| -+ macro(:"action_or_exception_#{cc}") do |size, val, action, *args| -+ If(val, 0).CC(:"CC_#{cc}".upcase).b { -+ pc_eh := find_catch_block() -+ } Else { -+ action.call(*args) unless action.nil? -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ curr_frame := Phi(frame_eh, frame).ptr -+ Phi(pc_eh, pc_inc).ptr -+ end -+end -+ -+macro(:exception_val) do -+ LoadI(%tr).Imm(Constants::THREAD_EXCEPTION_OFFSET).ptr -+end -+ - macro(:generic_call) do |id, size, is_initobj, receiver, nargs, copy_lambda| - caller := LoadI(%frame).Imm("Frame::GetMethodOffset()").ptr - callee := call_runtime("GetCalleeMethodFromBytecodeId", caller, u16toword(id)).ptr - if receiver -- callee := call_runtime("ResolveVirtualMethod", callee, receiver).ptr -+ is_handling_exception := receiver -+ else -+ is_handling_exception := AddI(0).Imm(1).ptr -+ end -+ if receiver -+ If(receiver, 0).CC(:CC_EQ).ptr { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ callee_ := call_runtime("ResolveVirtualMethod", callee, receiver).ptr -+ } -+ pc := Phi(pc_eh, pc).ptr -+ frame := Phi(frame_eh, frame).ptr -+ callee := Phi(callee, callee_).ptr - end - if is_initobj -- # TODO: multiarray for initobj -- klass := u32toword(LoadI(callee).Imm("Method::GetClassOffset()").u32) -- # TODO(mbolshov): handle nullptr for returned obj -- obj := call_runtime("CreateObjectByClassInterpreter", %tr, klass).ptr -+ # TODO: multiarray for initobj -+ # klass := u32toword(LoadI(callee).Imm("Method::GetClassOffset()").u32) -+ obj := call_runtime("InitObjectByIdEntrypoint", caller, u16tou32(id)).ptr -+ # is_handling_exception := obj -+ If(obj, 0).CC(:CC_EQ).b { -+ is_handling_exception_eh := AddI(0).Imm(0).ptr -+ pc_eh := find_catch_block() -+ } Else { - set_acc_object(acc_ptr, obj) -+ } -+ pc := Phi(pc_eh, pc).ptr -+ frame := Phi(frame_eh, frame).ptr -+ is_handling_exception := Phi(is_handling_exception_eh, is_handling_exception).ptr - end -- If(call_runtime("HasCompiledCode", callee).i32, 0).CC(:CC_NE).b { -- call_runtime("InterpreterToCompiledCodeBridge", %pc, %frame, callee, %tr).void -- StoreI(%tr, %frame).Imm("ManagedThread::GetFrameOffset()").ptr -- pc_native := advance_pc_imm(%pc, size) -- } Else { -- num_vregs := call_runtime("GetNumVregsByMethod", callee).word -- num_vregs := AddI(num_vregs).Imm(1).word if is_initobj -- if nargs -- num_args := nargs -- else -- num_args := call_runtime("GetNumArgsByMethod", callee).word -- end -- frame_size := Add(num_vregs, num_args).word -- actual_size := Add(frame_size, frame_size).word -- # TODO(mbolshov): Fast path for frame allocation should be done in irtoc -- new_frame := call_runtime("CreateFrameWithSize", actual_size, frame_size, callee, %frame).ptr -- StoreI(new_frame, "Frame::IS_STACKLESS").Imm("Frame::GetFlagsOffset()").word -- if is_initobj -- obj_vreg_ptr := frame_vreg_ptr(new_frame, SubI(num_vregs).Imm(1).word) -- set_tag_frame(new_frame, obj_vreg_ptr, 0x1) -- set_value(obj_vreg_ptr, obj) -- end -- copy_lambda.call(new_frame, num_vregs, num_args) -- StoreI(new_frame, %frame).Imm("Frame::GetPrevFrameOffset()").ptr -- StoreI(%tr, new_frame).Imm("ManagedThread::GetFrameOffset()").ptr -- StoreI(%frame, advance_pc_imm(%pc, size)).Imm("Frame::GetNextInstructionOffset()").ptr -- pc_int := call_runtime("GetInstructionsByMethod", callee).ptr -+ If(is_handling_exception, 0).CC(:CC_NE).ptr { -+ If(call_runtime("HasCompiledCode", callee).i32, 0).CC(:CC_NE).b { -+ call_runtime("InterpreterToCompiledCodeBridge", pc, frame, callee, %tr).void -+ pc_native := action_or_exception_ne(size, exception_val(), -> () { StoreI(%tr, frame).Imm("ManagedThread::GetFrameOffset()").ptr }) -+ } Else { -+ num_vregs := call_runtime("GetNumVregsByMethod", callee).word -+ num_vregs := AddI(num_vregs).Imm(1).word if is_initobj -+ if nargs -+ num_args := nargs -+ else -+ num_args := call_runtime("GetNumArgsByMethod", callee).word -+ end -+ frame_size := Add(num_vregs, num_args).word -+ actual_size := Add(frame_size, frame_size).word -+ # TODO(mbolshov): Fast path for frame allocation should be done in irtoc -+ new_frame := call_runtime("CreateFrameWithSize", actual_size, frame_size, callee, frame).ptr -+ StoreI(new_frame, "Frame::IS_STACKLESS").Imm("Frame::GetFlagsOffset()").word -+ if is_initobj -+ obj_vreg_ptr := frame_vreg_ptr(new_frame, SubI(num_vregs).Imm(1).word) -+ set_tag_frame(new_frame, obj_vreg_ptr, 0x1) -+ set_value(obj_vreg_ptr, obj) -+ end -+ copy_lambda.call(new_frame, num_vregs, num_args) -+ StoreI(new_frame, frame).Imm("Frame::GetPrevFrameOffset()").ptr -+ StoreI(%tr, new_frame).Imm("ManagedThread::GetFrameOffset()").ptr -+ StoreI(frame, advance_pc_imm(pc, size)).Imm("Frame::GetNextInstructionOffset()").ptr -+ pc_int := call_runtime("GetInstructionsByMethod", callee).ptr -+ } -+ frame_ := Phi(curr_frame, new_frame).ptr -+ pc_ := Phi(pc_native, pc_int).ptr -+ StoreI(frame_, pc_).Imm("Frame::GetInstructionsOffset()").ptr - } -- frame := Phi(%frame, new_frame).ptr -- pc := Phi(pc_native, pc_int).ptr -+ frame := Phi(frame, frame_).ptr -+ pc := Phi(pc, pc_).ptr - end - - macro(:generic_return) do |copy_lambda| -@@ -497,6 +548,16 @@ end - - # Handlers: - -+macro(:handle_throw) do |vs| -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ } Else { -+ call_runtime("ThrowExceptionFromInterpreter", vs).ptr -+ } -+ pc := find_catch_block() -+ frame := frame_eh -+end -+ - macro(:handle_movi) do |vd, imm| - set_primitive(vd, imm).i32 - end -@@ -514,23 +575,25 @@ macro(:handle_lda) do |vs| - set_acc_primitive(acc_ptr, vs).u32 - end - --macro(:handle_lda_str_id32) do |id| -+macro(:handle_lda_str_id32) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - string := call_runtime("ResolveStringEntrypoint", method_ptr, id).ptr -- set_acc_object(acc_ptr, string).ref -+ pc := action_or_exception_eq(size, string, -> (acc_ptr, string) { set_acc_object(acc_ptr, string).ref }, acc_ptr, string) -+ frame := curr_frame - end - --macro(:handle_lda_type_id16) do |id| -+macro(:handle_lda_type_id16) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- # TODO: fix -- type := call_runtime("ResolveStringEntrypoint", method_ptr, u16tou32(id)).ptr -- set_acc_object(acc_ptr, type).ref -+ type := call_runtime("ResolveTypeEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc := action_or_exception_eq(size, type, -> (acc_ptr, type) { set_acc_object(acc_ptr, type).ref }, acc_ptr, type) -+ frame := curr_frame - end - --macro(:handle_lda_const_v8_id32) do |v, id| -+macro(:handle_lda_const_v8_id32) do |v, id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- cnst := call_runtime("ResolveLiteralArrayEntrypoint", method_ptr, id).ptr -- set_object(v, cnst).ref -+ cnst := call_runtime("ResolveLiteralArrayByIdEntrypoint", method_ptr, id).ptr -+ pc := action_or_exception_eq(size, cnst, -> (v, cnst) { set_object(v, cnst).ref }, v, cnst) -+ frame := curr_frame - end - - macro(:handle_ldai_imm) do |imm| -@@ -629,241 +692,566 @@ end - end - - ['Div', 'Mod'].each do |op| -- macro(:"handle_#{op.downcase}_v4_v4") do |vs1, vs2| -- # TODO: exception if vs2 is 0 -- v_ := send(op, vs1, vs2).i32 -- set_value(acc_ptr, v_).i32 -+ macro(:"handle_#{op.downcase}_v4_v4") do |vs1, vs2, size| -+ IfImm(vs2).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, vs1, vs2).i32 -+ set_value(acc_ptr, v_).i32 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end -- macro(:"handle_#{op.downcase}2_v8") do |vs| -- # TODO: exception if vs is 0 -- v_ := send(op, acc_value.i32, vs).i32 -- set_value(acc_ptr, v_).i32 -+ macro(:"handle_#{op.downcase}2_v8") do |vs, size| -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, acc_value.i32, vs).i32 -+ set_value(acc_ptr, v_).i32 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end -- macro(:"handle_#{op.downcase}2_64_v8") do |vs| -- # TODO: exception if vs is 0 -- v_ := send(op, acc_value.i64, vs).i64 -- set_value(acc_ptr, v_).i64 -+ macro(:"handle_#{op.downcase}2_64_v8") do |vs, size| -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, acc_value.i64, vs).i64 -+ set_value(acc_ptr, v_).i64 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end -- macro(:"handle_#{op.downcase}u2_v8") do |vs| -- # TODO: exception if vs is 0 -- v_ := send(op, acc_value.u32, vs).u32 -- set_value(acc_ptr, v_).u32 -+ macro(:"handle_#{op.downcase}u2_v8") do |vs, size| -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, acc_value.u32, vs).u32 -+ set_value(acc_ptr, v_).u32 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end -- macro(:"handle_#{op.downcase}u2_64_v8") do |vs| -- # TODO: exception if vs is 0 -- v_ := send(op, acc_value.u64, vs).u64 -- set_value(acc_ptr, v_).u64 -+ macro(:"handle_#{op.downcase}u2_64_v8") do |vs, size| -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, acc_value.u64, vs).u64 -+ set_value(acc_ptr, v_).u64 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end -- macro(:"handle_#{op.downcase}i_imm") do |imm| -- # TODO: exception if imm is 0 -- v_ := send(op, acc_value.i32, imm).i32 -- set_value(acc_ptr, v_).i32 -+ macro(:"handle_#{op.downcase}i_imm") do |imm, size| -+ IfImm(imm).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowArithmeticExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ v_ := send(op, acc_value.i32, imm).i32 -+ set_value(acc_ptr, v_).i32 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end - end - --macro(:handle_newarr_v4_v4_id16) do |vd, vs, id| -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- array := call_runtime("CreateArrayByIdEntrypoint", method_ptr, u16tou32(id), vs.word).ptr -- set_object(vd, array).ref -+macro(:handle_newarr_v4_v4_id16) do |vd, vs, id, size| -+ If(vs, 0).CC(:CC_LT).b { -+ call_runtime("ThrowNegativeArraySizeExceptionFromInterpreter", vs).ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ array := call_runtime("CreateArrayByIdEntrypoint", method_ptr, u16tou32(id), vs).ptr -+ pc_2 := action_or_exception_eq(size, array, -> (vd, array) { set_object(vd, array).ref }, vd, array) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - --macro(:handle_lenarr_v8) do |vs| -- ss := SaveState() -+macro(:handle_lenarr_v8) do |vs, size| - # TODO(aantipina): add assert(has_object(vs)) -- null_check := NullCheck(vs.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- set_acc_primitive(acc_ptr, len_array).i32 -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh := find_catch_block() -+ } Else { -+ len_array := LoadI(vs).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ set_acc_primitive(acc_ptr, len_array).i32 -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame := Phi(frame_eh, frame).ptr -+ pc := Phi(pc_eh, pc_inc).ptr - end - --[['ldarr', :i32], ['ldarr_64', :i64], ['fldarr_64', :f64], ['fldarr_32', :f32]].each do |name, type| -- macro(:"handle_#{name}_v8") do |vs| -- ss := SaveState() -+[['ldarr', :i32, 2], ['ldarr_64', :i64, 3], ['fldarr_64', :f64, 3], ['fldarr_32', :f32, 2]].each do |name, type, elem_size_shift| -+ macro(:"handle_#{name}_v8") do |vs, size| - # TODO(aantipina): add assert(has_object(vs)) -- null_check := NullCheck(vs.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, acc_value.i32, ss).i32 -- load_array := LoadArray(null_check, bounds_check).send(type) -- set_value(acc_ptr, load_array).send(type) -+ -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(acc_value.i32, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(acc_value.i32, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(acc_value.u32).Imm(elem_size_shift).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ load_array := Load(vs, elem_offset).send(type) -+ set_value(acc_ptr, load_array).send(type) -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - end - --[8, 16].each do |size| -- macro(:"handle_ldarr_#{size}_v8") do |vs| -- ss := SaveState() -+[[8, 0], [16, 1]].each do |size, elem_size_shift| -+ macro(:"handle_ldarr_#{size}_v8") do |vs, op_size| - # TODO(aantipina): add assert(has_object(vs)) -- null_check := NullCheck(vs.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, acc_value.i32, ss).i32 -- load_array := LoadArray(null_check, bounds_check).send(:"i#{size}") -- set_value(acc_ptr, send(:"i#{size}toi32", load_array)).i32 -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(acc_value.i32, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(acc_value.i32, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(acc_value.u32).Imm(elem_size_shift).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ load_array := Load(vs, elem_offset).send(:"i#{size}") -+ set_value(acc_ptr, send(:"i#{size}toi32", load_array)).i32 -+ pc_inc := advance_pc_imm(pc, op_size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - end - --[8, 16].each do |size| -- macro(:"handle_ldarru_#{size}_v8") do |vs| -- ss := SaveState() -+[[8, 0], [16, 1]].each do |size, elem_size_shift| -+ macro(:"handle_ldarru_#{size}_v8") do |vs, op_size| - # TODO(aantipina): add assert(has_object(vs)) -- null_check := NullCheck(vs.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, acc_value.i32, ss).i32 -- load_array := LoadArray(null_check, bounds_check).send(:"u#{size}") -- set_value(acc_ptr, send(:"u#{size}tou32", load_array)).u32 -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(acc_value.i32, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(acc_value.i32, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(acc_value.u32).Imm(elem_size_shift).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ load_array := Load(vs, elem_offset).send(:"u#{size}") -+ set_value(acc_ptr, send(:"u#{size}tou32", load_array)).u32 -+ pc_inc := advance_pc_imm(pc, op_size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - end - --macro(:handle_ldarr_obj_v8) do |vs| -- ss := SaveState() -+macro(:handle_ldarr_obj_v8) do |vs, size| - # TODO(aantipina): add assert(has_object(vs)) -- null_check := NullCheck(vs.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, acc_value.i32, ss).i32 -- load_array := LoadArray(null_check, bounds_check).ref -- set_acc_object(acc_ptr, load_array).ref -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(acc_value.i32, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(acc_value.i32, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", acc_value.i32, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(acc_value.u32).Imm(Constants::REFERENCE_TYPE_SHIFT).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ load_array := Load(vs, elem_offset).ref -+ set_acc_object(acc_ptr, load_array).ref -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - --[8, 16].each do |size| -- macro(:"handle_starr_#{size}_v4_v4") do |vs1, vs2| -- ss := SaveState() -+[[8, 0], [16, 1]].each do |size, elem_size_shift| -+ macro(:"handle_starr_#{size}_v4_v4") do |vs1, vs2, op_size| - # TODO(aantipina): add assert(has_object(vs1)) -- null_check := NullCheck(vs1.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, vs2, ss).i32 -- StoreArray(null_check, bounds_check, acc_value.i32).send(:"i#{size}") -+ IfImm(vs1).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs1).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(vs2, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(vs2, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(vs2).Imm(elem_size_shift).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ Store(vs1, elem_offset, acc_value.i32).send(:"i#{size}") -+ pc_inc := advance_pc_imm(pc, op_size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - end - --[['starr', :i32], ['starr_64', :i64], ['starr_obj', :ref], ['fstarr_32', :f32], ['fstarr_64', :f64]].each do |name, type| -- macro(:"handle_#{name}_v4_v4") do |vs1, vs2| -- ss := SaveState() -+[['starr', :i32, 2], ['starr_64', :i64, 3], ['fstarr_32', :f32, 2], ['fstarr_64', :f64, 3]].each do |name, type, elem_size_shift| -+ macro(:"handle_#{name}_v4_v4") do |vs1, vs2, size| - # TODO(aantipina): add assert(has_object(vs1)) -- null_check := NullCheck(vs1.ref, ss).ref -- len_array := LoadI(null_check).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -- bounds_check := BoundsCheck(len_array, vs2.i32, ss).i32 -- StoreArray(null_check, bounds_check, acc_value.send(type)).send(type) -+ IfImm(vs1).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs1).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(vs2, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(vs2, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ elem_offset = AddI(ShlI(vs2).Imm(elem_size_shift).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ Store(vs1, elem_offset, acc_value.send(type)).send(type) -+ pc_inc := advance_pc_imm(pc, size) -+ } -+ frame_1 := Phi(frame_eh_oob_2, frame).ptr -+ pc_1 := Phi(pc_eh_oob_2, pc_inc).ptr -+ } -+ frame_2 := Phi(frame_eh_oob_1, frame_1).ptr -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ } -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr - end - end - --macro(:handle_newobj_v8_id16) do |vd, id| -+macro(:handle_starr_obj_v4_v4) do |vs1, vs2, size| -+ IfImm(vs1).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_eh_null_pointer := find_catch_block() -+ frame_eh_null_pointer := frame_eh -+ } Else { -+ len_array := LoadI(vs1).Imm(Constants::ARRAY_LENGTH_OFFSET).i32 -+ If(vs2, 0).CC(:CC_LT).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_1 := find_catch_block() -+ frame_eh_oob_1 := frame_eh -+ } Else { -+ If(vs2, len_array).CC(:CC_GE).b{ -+ call_runtime("ThrowArrayIndexOutOfBoundsExceptionFromInterpreter", vs2, len_array).ptr -+ pc_eh_oob_2 := find_catch_block() -+ frame_eh_oob_2 := frame_eh -+ } Else { -+ res := call_runtime("CheckStoreArrayReferenceFromInterpreter", vs1, acc_value.ref).u8 -+ pc_0 := action_or_exception_ne(size, res, -> (vs1, vs2, acc_value) { -+ elem_offset = AddI(ShlI(vs2).Imm(Constants::REFERENCE_TYPE_SHIFT).u32).Imm(Constants::ARRAY_DATA_OFFSET).u32 -+ Store(vs1, elem_offset, acc_value.ref).SetNeedBarrier(true).ref }, -+ vs1, vs2, acc_value) -+ } -+ pc_1 := Phi(pc_eh_oob_2, pc_0).ptr -+ frame_1 := Phi(frame_eh_oob_2, curr_frame).ptr -+ } -+ pc_2 := Phi(pc_eh_oob_1, pc_1).ptr -+ frame_2:= Phi(frame_eh_oob_1, frame_1).ptr -+ } -+ pc := Phi(pc_eh_null_pointer, pc_2).ptr -+ frame := Phi(frame_eh_null_pointer, frame_2).ptr -+end -+ -+macro(:handle_newobj_v8_id16) do |vd, id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - # TODO(mbolshov): handle returned nullptr -- object := call_runtime("CreateObjectByIdEntrypoint", %tr, method_ptr, u16tou32(id)).ptr -- set_object(vd, object).ref -+ object := call_runtime("CreateObjectByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc := action_or_exception_eq(size, object, -> (vd, object) { set_object(vd, object).ref }, vd, object) -+ frame := curr_frame - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_stobj_#{name}v8_id16") do |vs, id| -+ macro(:"handle_stobj_#{name}v8_id16") do |vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- Store(vs, offset, acc_value.send(type)).send(type) -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, acc_value, type) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ Store(vs, offset, acc_value.send(type)).send(type) }, -+ field, vs, acc_value, type) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - end - --macro(:handle_stobj_obj_v8_id16) do |vs, id| -+macro(:handle_stobj_obj_v8_id16) do |vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) - # TODO(aantipina): add assert(has_object(acc)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- Store(vs, offset, acc_value.ref).SetNeedBarrier(true).ref -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, acc_value) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ Store(vs, offset, acc_value.ref).SetNeedBarrier(true).ref }, -+ field, vs, acc_value) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_stobj_v_#{name}v4_v4_id16") do |v1, v2, id| -- # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- Store(v2.ref, offset, v1.send(type)).send(type) -+ macro(:"handle_stobj_v_#{name}v4_v4_id16") do |v1, v2, id, size| -+ # TODO(aantipina): add assert(has_object(v2)) -+ IfImm(v2).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, v1, v2, type) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ Store(v2, offset, v1).send(type) }, -+ field, v1, v2, type) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - end - --macro(:handle_stobj_v_obj_v4_v4_id16) do |v1, v2, id| -- # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- Store(v2.ref, offset, v1.ref).SetNeedBarrier(true).ref -+macro(:handle_stobj_v_obj_v4_v4_id16) do |v1, v2, id, size| -+ # TODO(aantipina): add assert(has_object(v2)) -+ IfImm(v2).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, v1, v2) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ Store(v2, offset, v1).SetNeedBarrier(true).ref }, -+ field, v1, v2) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_ldobj_#{name}v8_id16") do |vs, id| -+ macro(:"handle_ldobj_#{name}v8_id16") do |vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- value := Load(vs, offset).send(type) -- set_acc_primitive(acc_ptr, value).send(type) -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, acc_ptr, type) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ value := Load(vs, offset).send(type) -+ set_acc_primitive(acc_ptr, value).send(type) }, -+ field, vs, acc_ptr, type) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - end - --macro(:handle_ldobj_obj_v8_id16) do |vs, id| -+macro(:handle_ldobj_obj_v8_id16) do |vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- value := Load(vs, offset).ref -- set_acc_object(acc_ptr, value).ref -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, acc_ptr) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ value := Load(vs, offset).ref -+ set_acc_object(acc_ptr, value).ref }, -+ field, vs, acc_ptr) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_ldobj_v_#{name}v4_v4_id16") do |vd, vs, id| -+ macro(:"handle_ldobj_v_#{name}v4_v4_id16") do |vd, vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- value := Load(vs, offset).send(type) -- set_primitive(vd, value).send(type) -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, vd, type) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ value := Load(vs, offset).send(type) -+ set_primitive(vd, value).send(type) }, -+ field, vs, vd, type) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - end - --macro(:handle_ldobj_v_obj_v4_v4_id16) do |vd, vs, id| -+macro(:handle_ldobj_v_obj_v4_v4_id16) do |vd, vs, id, size| - # TODO(aantipina): add assert(has_object(vs)) -- method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -- offset := call_runtime("GetFieldOffsetByIdEntrypoint", method_ptr, u16tou32(id)).word -- value := Load(vs, offset).ref -- set_object(vd, value).ref -+ IfImm(vs).Imm(0).CC(:CC_EQ).b { -+ call_runtime("ThrowNullPointerExceptionFromInterpreter").ptr -+ pc_1 := find_catch_block() -+ frame_1 := frame_eh -+ } Else { -+ method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr -+ field := call_runtime("GetFieldByIdEntrypoint", method_ptr, u16tou32(id)).ptr -+ pc_2 := action_or_exception_eq(size, field, -> (field, vs, vd) { -+ offset = LoadI(field).Imm("Field::GetOffsetOffset()").word -+ value := Load(vs, offset).ref -+ set_object(vd, value).ref }, -+ field, vs, vd) -+ } -+ pc := Phi(pc_1, pc_2).ptr -+ frame := Phi(frame_1, curr_frame).ptr - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_ststatic_#{name}id16") do |id| -+ macro(:"handle_ststatic_#{name}id16") do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - addr := call_runtime("GetStaticFieldAddressByIdEntrypoint", %tr, method_ptr, u16tou32(id)).ptr -- # TODO(aantipina): add assert(is_static(field)) -- StoreI(addr, acc_value.send(type)).Imm(0).send(type) -+ pc := action_or_exception_eq(size, addr, -> (addr, acc_value, type) { StoreI(addr, acc_value.send(type)).Imm(0).send(type) }, -+ addr, acc_value, type) -+ frame := curr_frame - end - end - --macro(:handle_ststatic_obj_id16) do |id| -+macro(:handle_ststatic_obj_id16) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - addr := call_runtime("GetStaticFieldAddressByIdEntrypoint", %tr, method_ptr, u16tou32(id)).ptr -- # TODO(aantipina): add assert(is_static(field)) -- StoreI(addr, acc_value.ref).Imm(0).SetNeedBarrier(true).ref -+ pc := action_or_exception_eq(size, addr, -> (addr, acc_value) { StoreI(addr, acc_value.ref).Imm(0).SetNeedBarrier(true).ref }, -+ addr, acc_value) -+ frame := curr_frame - end - - [['', :u32], ['64_', :u64]].each do |name, type| -- macro(:"handle_ldstatic_#{name}id16") do |id| -+ macro(:"handle_ldstatic_#{name}id16") do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - addr := call_runtime("GetStaticFieldAddressByIdEntrypoint", %tr, method_ptr, u16tou32(id)).ptr -- # TODO(aantipina): add assert(is_static(field)) -- value := LoadI(addr).Imm(0).send(type) -- set_acc_primitive(acc_ptr, value).send(type) -+ pc := action_or_exception_eq(size, addr, -> (addr, type, acc_ptr, value) { value := LoadI(addr).Imm(0).send(type) -+ set_acc_primitive(acc_ptr, value).send(type) }, -+ addr, type, acc_ptr, value) -+ frame := curr_frame - end - end - --macro(:handle_ldstatic_obj_id16) do |id| -+macro(:handle_ldstatic_obj_id16) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - addr := call_runtime("GetStaticFieldAddressByIdEntrypoint", %tr, method_ptr, u16tou32(id)).ptr -- # TODO(aantipina): add assert(is_static(field)) -- value := LoadI(addr).Imm(0).ref -- set_acc_object(acc_ptr, value).ref -+ pc := action_or_exception_eq(size, addr, -> (addr, acc_ptr, value) { value := LoadI(addr).Imm(0).ref -+ set_acc_object(acc_ptr, value).ref }, -+ addr, acc_ptr, value) -+ frame := curr_frame - end - --macro(:handle_isinstance_id16) do |id| -+macro(:handle_isinstance_id16) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - # TODO(aantipina): add assert(has_object(acc)) - res := call_runtime("IsInstanceByBCIDEntrypoint", method_ptr, acc_value.u64, u16tou32(id)).u8 -- set_acc_primitive(acc_ptr, u8tou32(res)).u32 -+ pc := action_or_exception_ne(size, exception_val(), -> (acc_ptr, res) { set_acc_primitive(acc_ptr, u8tou32(res)).u32 }, -+ acc_ptr, res) -+ frame := curr_frame - end - --macro(:handle_checkcast_id16) do |id| -+macro(:handle_checkcast_id16) do |id, size| - method_ptr := LoadI(%frame).Imm(Constants::FRAME_METHOD_OFFSET).ptr - # TODO(aantipina): add assert(has_object(acc)) -- call_runtime("CheckCastByBCIDEntrypoint", method_ptr, acc_value.u64, u16tou32(id)).void -+ res := call_runtime("CheckCastByBCIDEntrypoint", method_ptr, acc_value.u64, u16tou32(id)).u8 -+ pc := action_or_exception_ne(size, res, nil) -+ frame := curr_frame - end - - macro(:handle_sta_obj_v8) do |vd| -@@ -999,8 +1387,6 @@ end - end - end - -- -- - macro(:handle_i64toi32) do - set_value(acc_ptr, i64toi32(acc_value.i64)).i32 - end -@@ -1307,11 +1693,11 @@ Panda.instructions.each do |i| - when "LDA_OBJ_V8" - handle_lda_obj_v8(vreg_value(op[0]).ref) - when "LDA_STR_ID32" -- handle_lda_str_id32(as_id(op[0])) -+ handle_lda_str_id32(as_id(op[0]), i.format.size) - when "LDA_TYPE_ID16" -- handle_lda_type_id16(as_id(op[0])) -+ handle_lda_type_id16(as_id(op[0]), i.format.size) - when "LDA_CONST_V8_ID32" -- handle_lda_const_v8_id32(vreg_ptr(op[0]), as_id(op[1])) -+ handle_lda_const_v8_id32(vreg_ptr(op[0]), as_id(op[1]), i.format.size) - when "LDAI_IMM8" - handle_ldai_imm(i8toi32(as_imm(op[0]))) - when "LDAI_IMM16" -@@ -1327,43 +1713,43 @@ Panda.instructions.each do |i| - when "LDA_NULL" - handle_lda_null() - when "LENARR_V8" -- handle_lenarr_v8(vreg_value(op[0])) -+ handle_lenarr_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARR_V8" -- handle_ldarr_v8(vreg_value(op[0])) -+ handle_ldarr_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARR_8_V8" -- handle_ldarr_8_v8(vreg_value(op[0])) -+ handle_ldarr_8_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARR_16_V8" -- handle_ldarr_16_v8(vreg_value(op[0])) -+ handle_ldarr_16_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARRU_8_V8" -- handle_ldarru_8_v8(vreg_value(op[0])) -+ handle_ldarru_8_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARRU_16_V8" -- handle_ldarru_16_v8(vreg_value(op[0])) -+ handle_ldarru_16_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARR_64_V8" -- handle_ldarr_64_v8(vreg_value(op[0])) -+ handle_ldarr_64_v8(vreg_value(op[0]).ref, i.format.size) - when "FLDARR_32_V8" -- handle_fldarr_32_v8(vreg_value(op[0])) -+ handle_fldarr_32_v8(vreg_value(op[0]).ref, i.format.size) - when "FLDARR_64_V8" -- handle_fldarr_64_v8(vreg_value(op[0])) -+ handle_fldarr_64_v8(vreg_value(op[0]).ref, i.format.size) - when "LDARR_OBJ_V8" -- handle_ldarr_obj_v8(vreg_value(op[0])) -+ handle_ldarr_obj_v8(vreg_value(op[0]).ref, i.format.size) - when "LDOBJ_V8_ID16" -- handle_ldobj_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_ldobj_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "LDOBJ_V_V4_V4_ID16" -- handle_ldobj_v_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2])) -+ handle_ldobj_v_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "LDOBJ_64_V8_ID16" -- handle_ldobj_64_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_ldobj_64_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "LDOBJ_V_64_V4_V4_ID16" -- handle_ldobj_v_64_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2])) -+ handle_ldobj_v_64_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "LDOBJ_OBJ_V8_ID16" -- handle_ldobj_obj_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_ldobj_obj_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "LDOBJ_V_OBJ_V4_V4_ID16" -- handle_ldobj_v_obj_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2])) -+ handle_ldobj_v_obj_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "LDSTATIC_ID16" -- handle_ldstatic_id16(as_id(op[0])) -+ handle_ldstatic_id16(as_id(op[0]), i.format.size) - when "LDSTATIC_64_ID16" -- handle_ldstatic_64_id16(as_id(op[0])) -+ handle_ldstatic_64_id16(as_id(op[0]), i.format.size) - when "LDSTATIC_OBJ_ID16" -- handle_ldstatic_obj_id16(as_id(op[0])) -+ handle_ldstatic_obj_id16(as_id(op[0]), i.format.size) - # sta - when "STA_V8" - handle_sta_v8(vreg_ptr(op[0])) -@@ -1372,37 +1758,37 @@ Panda.instructions.each do |i| - when "STA_OBJ_V8" - handle_sta_obj_v8(vreg_ptr(op[0])) - when "STARR_V4_V4" -- handle_starr_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_starr_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "STARR_8_V4_V4" -- handle_starr_8_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_starr_8_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "STARR_16_V4_V4" -- handle_starr_16_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_starr_16_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "STARR_64_V4_V4" -- handle_starr_64_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_starr_64_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "FSTARR_32_V4_V4" -- handle_fstarr_32_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_fstarr_32_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "FSTARR_64_V4_V4" -- handle_fstarr_64_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_fstarr_64_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "STARR_OBJ_V4_V4" -- handle_starr_obj_v4_v4(vreg_value(op[0]), vreg_value(op[1]).i32) -+ handle_starr_obj_v4_v4(vreg_value(op[0]).ref, vreg_value(op[1]).i32, i.format.size) - when "STOBJ_V8_ID16" -- handle_stobj_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_stobj_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "STOBJ_64_V8_ID16" -- handle_stobj_64_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_stobj_64_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "STOBJ_OBJ_V8_ID16" -- handle_stobj_obj_v8_id16(vreg_value(op[0]).ref, as_id(op[1])) -+ handle_stobj_obj_v8_id16(vreg_value(op[0]).ref, as_id(op[1]), i.format.size) - when "STOBJ_V_V4_V4_ID16" -- handle_stobj_v_v4_v4_id16(vreg_value(op[0]), vreg_value(op[1]), as_id(op[2])) -+ handle_stobj_v_v4_v4_id16(vreg_value(op[0]).u32, vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "STOBJ_V_64_V4_V4_ID16" -- handle_stobj_v_64_v4_v4_id16(vreg_value(op[0]), vreg_value(op[1]), as_id(op[2])) -+ handle_stobj_v_64_v4_v4_id16(vreg_value(op[0]).u64, vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "STOBJ_V_OBJ_V4_V4_ID16" -- handle_stobj_v_obj_v4_v4_id16(vreg_value(op[0]), vreg_value(op[1]), as_id(op[2])) -+ handle_stobj_v_obj_v4_v4_id16(vreg_value(op[0]).ref, vreg_value(op[1]).ref, as_id(op[2]), i.format.size) - when "STSTATIC_ID16" -- handle_ststatic_id16(as_id(op[0])) -+ handle_ststatic_id16(as_id(op[0]), i.format.size) - when "STSTATIC_64_ID16" -- handle_ststatic_64_id16(as_id(op[0])) -+ handle_ststatic_64_id16(as_id(op[0]), i.format.size) - when "STSTATIC_OBJ_ID16" -- handle_ststatic_obj_id16(as_id(op[0])) -+ handle_ststatic_obj_id16(as_id(op[0]), i.format.size) - # jmp - when "JMP_IMM8" - pc := handle_jmp_imm(pc, i8toi32(as_imm(op[0]))) -@@ -1542,34 +1928,34 @@ Panda.instructions.each do |i| - when "FDIV2_64_V8" - handle_fdiv2_64_v8(vreg_value(op[0]).f64) - when "DIV_V4_V4" -- handle_div_v4_v4(vreg_value(op[0]).i32, vreg_value(op[1]).i32) -+ handle_div_v4_v4(vreg_value(op[0]).i32, vreg_value(op[1]).i32, i.format.size) - when "DIV2_V8" -- handle_div2_v8(vreg_value(op[0]).i32) -+ handle_div2_v8(vreg_value(op[0]).i32, i.format.size) - when "DIVI_IMM8" -- handle_divi_imm(i8toi32(as_imm(op[0]))) -+ handle_divi_imm(i8toi32(as_imm(op[0])), i.format.size) - when "DIV2_64_V8" -- handle_div2_64_v8(vreg_value(op[0]).i64) -+ handle_div2_64_v8(vreg_value(op[0]).i64, i.format.size) - when "DIVU2_PREF_V8" -- handle_divu2_v8(vreg_value(op[0]).i32) -+ handle_divu2_v8(vreg_value(op[0]).i32, i.format.size) - when "DIVU2_64_PREF_V8" -- handle_divu2_64_v8(vreg_value(op[0]).i64) -+ handle_divu2_64_v8(vreg_value(op[0]).i64, i.format.size) - # mod - when "FMOD2_PREF_V8" - handle_fmod2_v8(vreg_value(op[0]).f32) - when "FMOD2_64_V8" - handle_fmod2_64_v8(vreg_value(op[0]).f64) - when "MOD_V4_V4" -- handle_mod_v4_v4(vreg_value(op[0]).i32, vreg_value(op[1]).i32) -+ handle_mod_v4_v4(vreg_value(op[0]).i32, vreg_value(op[1]).i32, i.format.size) - when "MOD2_V8" -- handle_mod2_v8(vreg_value(op[0]).i32) -+ handle_mod2_v8(vreg_value(op[0]).i32, i.format.size) - when "MODI_IMM8" -- handle_modi_imm(i8toi32(as_imm(op[0]))) -+ handle_modi_imm(i8toi32(as_imm(op[0])), i.format.size) - when "MOD2_64_V8" -- handle_mod2_64_v8(vreg_value(op[0]).i64) -+ handle_mod2_64_v8(vreg_value(op[0]).i64, i.format.size) - when "MODU2_PREF_V8" -- handle_modu2_v8(vreg_value(op[0]).i32) -+ handle_modu2_v8(vreg_value(op[0]).i32, i.format.size) - when "MODU2_64_PREF_V8" -- handle_modu2_64_v8(vreg_value(op[0]).i64) -+ handle_modu2_64_v8(vreg_value(op[0]).i64, i.format.size) - # neg - when "FNEG_64" - handle_fneg_64() -@@ -1638,14 +2024,14 @@ Panda.instructions.each do |i| - handle_neg_64() - # new - when "NEWARR_V4_V4_ID16" -- handle_newarr_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]), as_id(op[2])) -+ handle_newarr_v4_v4_id16(vreg_ptr(op[0]), vreg_value(op[1]).i32, as_id(op[2]), i.format.size) - when "NEWOBJ_V8_ID16" -- handle_newobj_v8_id16(vreg_ptr(op[0]), as_id(op[1])) -+ handle_newobj_v8_id16(vreg_ptr(op[0]), as_id(op[1]), i.format.size) - # checks - when "ISINSTANCE_ID16" -- handle_isinstance_id16(as_id(op[0])) -+ handle_isinstance_id16(as_id(op[0]), i.format.size) - when "CHECKCAST_ID16" -- handle_checkcast_id16(as_id(op[0])) -+ handle_checkcast_id16(as_id(op[0]), i.format.size) - # cast - when "I32TOU1_PREF_NONE" - handle_i32tou1() -@@ -1772,6 +2158,10 @@ Panda.instructions.each do |i| - when "RETURN_DYN" - Intrinsic(:INTERPRETER_RETURN).ptr - -+ # throw -+ when "THROW_V8" -+ handle_throw(vreg_value(op[0]).ref) -+ - include_plugin 'interpreter_main_loop' - - else -@@ -1779,7 +2169,12 @@ include_plugin 'interpreter_main_loop' - end - - if (i.properties & ['jump', 'call', 'return']).empty? -- pc := advance_pc_imm(pc, i.format.size) -+ if i.exceptions.include?('x_none') -+ pc := advance_pc_imm(pc, i.format.size) -+ elsif i.exceptions.include?('x_ecma') and !i.exceptions.include?('x_throw') -+ pc := action_or_exception_ne(i.format.size, exception_val(), nil) -+ frame := curr_frame -+ end - end - - LiveOut(pc).DstReg(regmap[:pc]).u64 -diff --git a/plugins/ecmascript/irtoc_scripts/interpreter_handlers.irt b/plugins/ecmascript/irtoc_scripts/interpreter_handlers.irt -index 2e7e6e3180..4283e9ac12 100644 ---- a/plugins/ecmascript/irtoc_scripts/interpreter_handlers.irt -+++ b/plugins/ecmascript/irtoc_scripts/interpreter_handlers.irt -@@ -21,6 +21,12 @@ macro(:ecma_intrinsic_setacc) do |sym, *args| - set_value(acc_ptr, res).u64 - end - -+macro(:ecma_intrinsic_check_setacc) do |sym, size, *args| -+ ecma_intrinsic_setacc(sym, *args) -+ pc := action_or_exception_ne(size, exception_val(), nil) -+ frame := curr_frame -+end -+ - macro(:ecma_intrinsic_invoke) do |sym, *args| - Call(*args).Method(sym).any - end -diff --git a/plugins/ecmascript/irtoc_scripts/interpreter_main_loop.irt b/plugins/ecmascript/irtoc_scripts/interpreter_main_loop.irt -index d28e1dcf08..4269076778 100644 ---- a/plugins/ecmascript/irtoc_scripts/interpreter_main_loop.irt -+++ b/plugins/ecmascript/irtoc_scripts/interpreter_main_loop.irt -@@ -152,21 +152,16 @@ - - # ecma frames - when "ECMA_CALL0DYN_PREF_V8" -- ecma_intrinsic_setacc("Call0Dyn", vreg_value(op[0]).u64) -- pc := advance_pc_imm(pc, i.format.size) -+ ecma_intrinsic_check_setacc("Call0Dyn", i.format.size, vreg_value(op[0]).u64) - when "ECMA_CALL1DYN_PREF_V8_V8" -- ecma_intrinsic_setacc("Call1Dyn", vreg_value(op[0]).u64, vreg_value(op[1]).u64) -- pc := advance_pc_imm(pc, i.format.size) -+ ecma_intrinsic_check_setacc("Call1Dyn", i.format.size, vreg_value(op[0]).u64, vreg_value(op[1]).u64) - when "ECMA_CALL2DYN_PREF_V8_V8_V8" -- ecma_intrinsic_setacc("Call2Dyn", vreg_value(op[0]).u64, vreg_value(op[1]).u64, vreg_value(op[2]).u64) -- pc := advance_pc_imm(pc, i.format.size) -+ ecma_intrinsic_check_setacc("Call2Dyn", i.format.size, vreg_value(op[0]).u64, vreg_value(op[1]).u64, vreg_value(op[2]).u64) - when "ECMA_CALL3DYN_PREF_V8_V8_V8_V8" -- ecma_intrinsic_setacc("Call3Dyn", vreg_value(op[0]).u64, vreg_value(op[1]).u64, vreg_value(op[2]).u64, vreg_value(op[3]).u64) -- pc := advance_pc_imm(pc, i.format.size) -+ ecma_intrinsic_check_setacc("Call3Dyn", i.format.size, vreg_value(op[0]).u64, vreg_value(op[1]).u64, vreg_value(op[2]).u64, vreg_value(op[3]).u64) - when "ECMA_LDLEXENVDYN_PREF_NONE" - res := handle_ecma_ldlexenvdyn() - set_value(acc_ptr, res).any -- # ecma_intrinsic_setacc("LdlexenvDyn") - when "ECMA_NEWLEXENVDYN_PREF_IMM16" - ecma_intrinsic_setacc("NewlexenvDyn", as_imm(op[0])) - when "ECMA_STLEXVARDYN_PREF_IMM4_IMM4" -@@ -243,6 +238,7 @@ - # ecma exceptions - when "ECMA_THROWDYN_PREF_NONE" - ecma_intrinsic_setacc("ThrowDyn", acc_value.u64) -- Intrinsic(:INTERPRETER_RETURN).ptr # TODO: goto exc handler -+ pc := find_catch_block() -+ frame := frame_eh - when "ECMA_THROWUNDEFINEDIFHOLE_PREF_ID32" - ecma_intrinsic_setacc("ThrowUndefinedIfHole", as_id(op[0]), acc_value.any) -diff --git a/plugins/ecmascript/tests/runtime/irtoc/advanced/advanced.js b/plugins/ecmascript/tests/runtime/irtoc/advanced/advanced.js -index 27ec082dc6..19c9342e4c 100644 ---- a/plugins/ecmascript/tests/runtime/irtoc/advanced/advanced.js -+++ b/plugins/ecmascript/tests/runtime/irtoc/advanced/advanced.js -@@ -186,4 +186,26 @@ function test_1() { - verify(~1 ^ 0xffffffff, 1, "Xor: smi, double"); - verify(0xffffffff ^ 0xaaaaaaaa, 1431655765, "Xor: double, double"); - } --test_1(); -\ No newline at end of file -+test_1(); -+ -+//exception handling -+function foo1() { -+ let capt1 = "capt1"; -+ try { -+ let capt2 = "capt2"; -+ let fn = function() { return capt1 + capt2; }; -+ fn(); -+ function foo2() { -+ throw fn; -+ } -+ foo2(); -+ } catch(e) { -+ return String("foo1 caught " + e()); -+ } -+} -+res = foo1(); -+expected = "foo1 caught capt1capt2" -+ -+if (res != expected) { -+ throw "res = " + res + ", expected = " + expected; -+} -diff --git a/plugins/ecmascript/tests/runtime/irtoc/basic/basic.js b/plugins/ecmascript/tests/runtime/irtoc/basic/basic.js -index d920e3be7c..cc641a9d35 100644 ---- a/plugins/ecmascript/tests/runtime/irtoc/basic/basic.js -+++ b/plugins/ecmascript/tests/runtime/irtoc/basic/basic.js -@@ -184,6 +184,38 @@ x = 0.1; - res = !x; - expected = false; - -+if (res != expected) { -+ throw "res = " + res + ", expected = " + expected; -+} -+ -+// ecma.throwdyn and exception handling -+function foo() { -+ try { -+ throw 1; -+ } catch(e) { -+ return String("foo1 caught" + e); -+ } -+} -+res = foo(); -+expected = "foo1 caught1"; -+ -+if (res != expected) { -+ throw "res = " + res + ", expected = " + expected; -+} -+ -+function bar1() { -+ try { -+ function bar2() { -+ throw 1; -+ } -+ bar2(); -+ } catch(e) { -+ return String("bar1 caught" + e); -+ } -+} -+res = bar1(); -+expected = "bar1 caught1"; -+ - if (res != expected) { - throw "res = " + res + ", expected = " + expected; - } -\ No newline at end of file -diff --git a/runtime/entrypoints/entrypoints.cpp b/runtime/entrypoints/entrypoints.cpp -index 10495bf412..8bb01bb035 100644 ---- a/runtime/entrypoints/entrypoints.cpp -+++ b/runtime/entrypoints/entrypoints.cpp -@@ -166,19 +166,15 @@ extern "C" void WriteTlabStatsEntrypoint(size_t size) - } - } - --extern "C" size_t GetClassIdEntrypoint(const Method *caller, uint32_t class_id) --{ -- BEGIN_ENTRYPOINT(); -- auto resolved_id = caller->GetClass()->ResolveClassIndex(BytecodeId(class_id).AsIndex()); -- return resolved_id.GetOffset(); --} -- - extern "C" coretypes::Array *CreateArrayByIdEntrypoint(const Method *caller, uint32_t class_id, size_t length) - { - BEGIN_ENTRYPOINT(); -- size_t resolved_id = GetClassIdEntrypoint(caller, class_id); -- auto *klass = reinterpret_cast(ResolveClassEntrypoint(caller, resolved_id)); -- return CreateArraySlowPathEntrypoint(klass, length); -+ auto *klass = -+ interpreter::RuntimeInterface::ResolveClass(ManagedThread::GetCurrent(), *caller, BytecodeId(class_id)); -+ if (UNLIKELY(klass == nullptr)) { -+ return nullptr; -+ } -+ return interpreter::RuntimeInterface::CreateArray(klass, length); - } - - extern "C" coretypes::Array *CreateArraySlowPathEntrypoint(Class *klass, size_t length) -@@ -249,23 +245,35 @@ extern "C" coretypes::Array *CreateMultiArrayEntrypoint(Class *klass, uint32_t n - return arr; - } - --extern "C" ObjectHeader *CreateObjectByClassInterpreter(ManagedThread *thread, Class *klass) -+extern "C" ObjectHeader *CreateObjectByIdEntrypoint(const Method *caller, uint32_t method_id) - { -- if (!klass->IsInitialized()) { -- auto *class_linker = Runtime::GetCurrent()->GetClassLinker(); -- if (!class_linker->InitializeClass(thread, klass)) { -- return nullptr; -- } -+ Class *klass = -+ interpreter::RuntimeInterface::ResolveClass(ManagedThread::GetCurrent(), *caller, BytecodeId(method_id)); -+ if (UNLIKELY(klass == nullptr)) { -+ return nullptr; - } -+ - return interpreter::RuntimeInterface::CreateObject(klass); - } - --extern "C" ObjectHeader *CreateObjectByIdEntrypoint(ManagedThread *thread, const Method *caller, uint32_t type_id) -+extern "C" ObjectHeader *InitObjectByIdEntrypoint(const Method *caller, uint32_t method_id) - { -- BEGIN_ENTRYPOINT(); -- size_t resolved_id = GetClassIdEntrypoint(caller, type_id); -- auto *klass = reinterpret_cast(ResolveClassEntrypoint(caller, resolved_id)); -- return CreateObjectByClassInterpreter(thread, klass); -+ Class *klass = interpreter::RuntimeInterface::GetMethodClass(caller, BytecodeId(method_id)); -+ if (UNLIKELY(klass == nullptr)) { -+ return nullptr; -+ } -+ -+ if (UNLIKELY(klass->IsArrayClass())) { -+ return nullptr; -+ } -+ -+ auto method = -+ interpreter::RuntimeInterface::ResolveMethod(ManagedThread::GetCurrent(), *caller, BytecodeId(method_id)); -+ if (UNLIKELY(method == nullptr)) { -+ return nullptr; -+ } -+ -+ return interpreter::RuntimeInterface::CreateObject(klass); - } - - extern "C" void DebugPrintEntrypoint([[maybe_unused]] panda::Frame *frame, [[maybe_unused]] const uint8_t *pc) -@@ -363,15 +371,21 @@ extern "C" ObjectHeader *PostBarrierWriteEntrypoint(ObjectHeader *obj, size_t si - return obj; - } - --extern "C" void CheckCastByBCIDEntrypoint(const Method *caller, ObjectHeader *obj, uint32_t type_id) -+extern "C" uint8_t CheckCastByBCIDEntrypoint(const Method *caller, ObjectHeader *obj, uint32_t type_id) - { - BEGIN_ENTRYPOINT(); -- auto thread = ManagedThread::GetCurrent(); -- VMHandle handle_obj(thread, obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) -- size_t resolved_id = GetClassIdEntrypoint(caller, type_id); -- auto klass = reinterpret_cast(ResolveClassEntrypoint(caller, resolved_id)); -+ auto klass = -+ interpreter::RuntimeInterface::ResolveClass(ManagedThread::GetCurrent(), *caller, BytecodeId(type_id)); -+ if (UNLIKELY(klass == nullptr)) { -+ return 1; -+ } - -- return CheckCastEntrypoint(handle_obj.GetPtr(), klass); -+ Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) -+ if (UNLIKELY(obj_klass != nullptr && !klass->IsAssignableFrom(obj_klass))) { -+ panda::ThrowClassCastException(klass, obj_klass); -+ return 1; -+ } -+ return 0; - } - - extern "C" void CheckCastEntrypoint(const ObjectHeader *obj, Class *klass) -@@ -408,8 +422,10 @@ extern "C" uint8_t IsInstanceByBCIDEntrypoint(const Method *caller, ObjectHeader - BEGIN_ENTRYPOINT(); - auto thread = ManagedThread::GetCurrent(); - VMHandle handle_obj(thread, obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) -- size_t resolved_id = GetClassIdEntrypoint(caller, type_id); -- auto klass = reinterpret_cast(ResolveClassEntrypoint(caller, resolved_id)); -+ auto klass = interpreter::RuntimeInterface::ResolveClass(thread, *caller, BytecodeId(type_id)); -+ if (UNLIKELY(klass == nullptr)) { -+ return 0; -+ } - - return IsInstanceEntrypoint(handle_obj.GetPtr(), klass); - } -@@ -453,6 +469,21 @@ extern "C" coretypes::String *ResolveStringEntrypoint(const Method *caller, File - panda_file::File::EntityId(id)); - } - -+extern "C" Class *ResolveTypeEntrypoint(const Method *caller, uint32_t id) -+{ -+ BEGIN_ENTRYPOINT(); -+ auto type = -+ interpreter::RuntimeInterface::ResolveClass(ManagedThread::GetCurrent(), *caller, BytecodeId(id)); -+ return type; -+} -+ -+extern "C" coretypes::Array *ResolveLiteralArrayByIdEntrypoint(const Method *caller, uint32_t type_id) -+{ -+ BEGIN_ENTRYPOINT(); -+ return interpreter::RuntimeInterface::ResolveLiteralArray(ManagedThread::GetCurrent()->GetVM(), *caller, -+ BytecodeId(type_id)); -+} -+ - extern "C" coretypes::String *ResolveStringAotEntrypoint(const Method *caller, FileEntityId id, ObjectHeader **slot) - { - BEGIN_ENTRYPOINT(); -@@ -645,17 +676,20 @@ extern "C" uintptr_t GetUnknownStaticFieldPtrEntrypoint(Method *method, uint32_t - return addr; - } - --extern "C" size_t GetFieldOffsetByIdEntrypoint(Method *caller, uint32_t field_id) -+extern "C" Field *GetFieldByIdEntrypoint(Method *caller, uint32_t field_id) - { - BEGIN_ENTRYPOINT(); -- size_t resolved_id = GetFieldIdEntrypoint(caller, field_id); -- return GetFieldOffsetEntrypoint(caller, resolved_id); -+ return interpreter::RuntimeInterface::ResolveField(ManagedThread::GetCurrent(), *caller, BytecodeId(field_id)); - } - - extern "C" uintptr_t GetStaticFieldAddressByIdEntrypoint(ManagedThread *thread, Method *caller, uint32_t field_id) - { - BEGIN_ENTRYPOINT(); - auto *field = interpreter::RuntimeInterface::ResolveField(thread, *caller, BytecodeId(field_id)); -+ if (field == nullptr) { -+ return reinterpret_cast(field); -+ } -+ ASSERT(field->IsStatic()); - return reinterpret_cast(field->GetClass()) + field->GetOffset(); - } - -@@ -1081,6 +1115,131 @@ extern "C" size_t GetNumArgsByMethod(const Method *method) - return method->GetNumArgs(); - } - -+extern "C" void ThrowExceptionFromInterpreter(ObjectHeader *exception) -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); -+ ManagedThread::GetCurrent()->SetException(exception); -+} -+ -+extern "C" void ThrowArithmeticExceptionFromInterpreter() -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); -+ ThrowArithmeticException(); -+} -+ -+extern "C" void ThrowNullPointerExceptionFromInterpreter() -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); -+ ThrowNullPointerException(); -+} -+ -+extern "C" void ThrowNegativeArraySizeExceptionFromInterpreter(ssize_t size) -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); -+ ThrowNegativeArraySizeException(size); -+} -+ -+extern "C" void ThrowArrayIndexOutOfBoundsExceptionFromInterpreter(ssize_t idx, size_t length) -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); -+ ThrowArrayIndexOutOfBoundsException(idx, length); -+} -+ -+extern "C" uint8_t CheckStoreArrayReferenceFromInterpreter(coretypes::Array *array, ObjectHeader *store_obj) -+{ -+ BEGIN_ENTRYPOINT(); -+ ASSERT(array != nullptr); -+ ASSERT(store_obj != nullptr); -+ -+ // SUPPRESS_CSA_NEXTLINE(alpha.core.WasteObjHeader) -+ auto *array_class = array->ClassAddr(); -+ auto *element_class = array_class->GetComponentType(); -+ // SUPPRESS_CSA_NEXTLINE(alpha.core.WasteObjHeader) -+ if (!store_obj->IsInstanceOf(element_class)) { -+ // SUPPRESS_CSA_NEXTLINE(alpha.core.WasteObjHeader) -+ panda::ThrowArrayStoreException(array_class, store_obj->ClassAddr()); -+ return 1; -+ } -+ return 0; -+} -+ -+extern "C" uint32_t FindCatchBlockInIFramesStackless(ManagedThread **curr_thread, Frame **curr_frame, const uint8_t *pc) -+{ -+ uint32_t inst = pc - (*curr_frame)->GetInstruction(); -+ Frame *frame = *curr_frame; -+ -+ while (frame != nullptr) { -+ ManagedThread *thread = *curr_thread; -+ Frame *prev = frame->GetPrevFrame(); -+ ASSERT(thread->HasPendingException()); -+ -+ Method *method = frame->GetMethod(); -+ uint32_t pc_offset = interpreter::RuntimeInterface::FindCatchBlock(*method, thread->GetException(), inst); -+ if (pc_offset != panda_file::INVALID_OFFSET) { -+ return pc_offset; -+ } -+ -+ if (!frame->IsStackless() || prev == nullptr || StackWalker::IsBoundaryFrame(prev)) { -+ return pc_offset; -+ } -+ -+ EVENT_METHOD_EXIT(method->GetFullName(), events::MethodExitKind::INTERP, thread->RecordMethodExit()); -+ -+ Runtime::GetCurrent()->GetNotificationManager()->MethodExitEvent(thread, method); -+ -+ inst = prev->GetBytecodeOffset(); -+ *curr_frame = prev; -+ -+ thread->GetVM()->HandleReturnFrame(); -+ -+ interpreter::RuntimeInterface::SetCurrentFrame(thread, prev); -+ -+ ASSERT(thread->HasPendingException()); -+ -+ if (frame->IsInitobj()) { -+ (*curr_frame)->GetAcc() = prev->GetAcc(); -+ } -+ interpreter::RuntimeInterface::FreeFrame(*curr_thread, frame); -+ -+ LOG(DEBUG, INTERPRETER) << "Exit: Runtime Call."; -+ -+ frame = prev; -+ } -+ -+ return panda_file::INVALID_OFFSET; -+} -+ -+extern "C" const uint8_t *FindCatchBlockInIFrames(ManagedThread *curr_thread, Frame *curr_frame, const uint8_t *pc) -+{ -+ BEGIN_ENTRYPOINT(); -+ -+ uint32_t pc_offset = panda_file::INVALID_OFFSET; -+ -+ pc_offset = FindCatchBlockInIFramesStackless(&curr_thread, &curr_frame, pc); -+ -+ if (pc_offset == panda_file::INVALID_OFFSET) { -+ if constexpr (RUNTIME_ARCH == Arch::AARCH64 || RUNTIME_ARCH == Arch::AARCH32 || RUNTIME_ARCH == Arch::X86_64) { -+ panda::FindCatchBlockInCallStack(curr_thread); -+ } -+ return pc; -+ } -+ -+ Method *method = curr_frame->GetMethod(); -+ ASSERT(method != nullptr); -+ LanguageContext ctx = interpreter::RuntimeInterface::GetLanguageContext(*method); -+ ObjectHeader *exception_object = curr_thread->GetException(); -+ ctx.SetExceptionToVReg(curr_frame->GetAcc(), exception_object); -+ -+ curr_thread->ClearException(); -+ Span sp(curr_frame->GetMethod()->GetInstructions(), pc_offset); -+ return sp.cend(); -+} -+ - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg) - extern "C" void TraceEntrypoint(size_t pid, ...) - { -diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt -index a3a8eb4148..c88dc8a022 100644 ---- a/tests/CMakeLists.txt -+++ b/tests/CMakeLists.txt -@@ -697,8 +697,8 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-43.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-44.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-45.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-46.pa") --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-47.pa" EXPECTED_STDOUT "/ by zero" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-48.pa" EXPECTED_STDOUT "/ by zero" SKIP_INTERPRETER_IRTOC) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-47.pa" EXPECTED_STDOUT "/ by zero") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-48.pa" EXPECTED_STDOUT "/ by zero") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-49.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-50.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/math-51.pa") -@@ -760,15 +760,15 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-05.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-06.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-07.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-08.pa") --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-09.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-10.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-11.pa" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-12.pa" EXPECTED_STDOUT "idx = 2; length = 1" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-13.pa" EXPECTED_STDOUT "idx = 2; length = 1" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-14.pa" EXPECTED_STDOUT "size = -1" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-02.pa" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-03.pa" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-04.pa" SKIP_INTERPRETER_IRTOC) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-09.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-10.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-11.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-12.pa" EXPECTED_STDOUT "idx = 2; length = 1") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-13.pa" EXPECTED_STDOUT "idx = 2; length = 1") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/arrays-14.pa" EXPECTED_STDOUT "size = -1") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-02.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-03.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/exceptions-04.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/far-jump-01.pa" COMPILER_OPTIONS --compiler-max-bytecode-size=40000) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/far-jump-02.pa" COMPILER_OPTIONS --compiler-max-bytecode-size=40000) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/far-jump-03.pa" COMPILER_OPTIONS --compiler-max-bytecode-size=40000) -@@ -806,18 +806,18 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-15.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-16.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-17.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-18.pa") --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-23.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-23.pa" SKIP_INTERPRETER_IRTOC) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-24.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-25.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-26.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4" SKIP_INTERPRETER_IRTOC) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-27.pa" VERIFIER_FAIL_TEST VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Verifier warning 4" SKIP_INTERPRETER_IRTOC) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-28.pa" EXPECTED_STDOUT "R1 cannot be cast to R2" VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Possibly incompatible accumulator type" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-29.pa" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-30.pa" EXPECTED_STDOUT "abstract method \"R.foo\"" SKIP_INTERPRETER_IRTOC) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-29.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-30.pa" EXPECTED_STDOUT "abstract method \"R.foo\"") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-31.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-32.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-33.pa" SKIP_CROSS "arm32" SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-34.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-34.pa" SKIP_INTERPRETER_IRTOC) - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-35.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/obj-36.pa" VERIFIER_OPTIONS "--log-level=warning" SEARCH_STDERR "Possibly incompatible accumulator type" SKIP_INTERPRETER_IRTOC) - -@@ -860,7 +860,7 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/initobj-04.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/initobj-05.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/initobj-06.pa") - --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/initobj-bad-02.pa" SKIP_VERIFICATION SKIP_INTERPRETER_IRTOC) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/initobj-bad-02.pa" SKIP_VERIFICATION) - - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/cts-assembly/big_ark_option_value.pa" RUNTIME_OPTIONS --code-cache-size-limit=3221225472) - -@@ -887,8 +887,8 @@ endif() - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/loop-peeling.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/shr32.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/deopt_false_test.pa" COMPILER_OPTIONS --compiler-inlining=false) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/deopt_true_test.pa" COMPILER_OPTIONS --compiler-inlining=false SKIP_INTERPRETER_IRTOC) --add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/deopt_true_call_test.pa" COMPILER_OPTIONS --compiler-inlining=true SKIP_INTERPRETER_IRTOC) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/deopt_true_test.pa" COMPILER_OPTIONS --compiler-inlining=false) -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/regression/deopt_true_call_test.pa" COMPILER_OPTIONS --compiler-inlining=true) - - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/sum.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/signed_imms.pa") -@@ -901,6 +901,7 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/call2.pa - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/call.acc.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/initobj.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/call.virt.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/arithmetic_exception.pa") - # Lots of runtime calls with enabled '--verify-entrypoints' option cause lots of standard allocations - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/barriers.pa" DISABLE_LIMIT_STD_ALLOC SKIP_CROSS "arm32") - -@@ -950,6 +951,7 @@ add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/irtoc-op - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/irtoc-opcode-suite/fmod2.64.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/irtoc-opcode-suite/fneg.pa") - add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/irtoc-opcode-suite/fneg.64.pa") -+add_test_file(FILE "${CMAKE_CURRENT_SOURCE_DIR}/irtoc-interpreter-tests/irtoc-opcode-suite/throw.pa") - - if (TARGET verifier-tests) - # Verifier regression tests -diff --git a/tests/irtoc-interpreter-tests/arithmetic_exception.pa b/tests/irtoc-interpreter-tests/arithmetic_exception.pa -new file mode 100644 -index 0000000000..ba0eee486b ---- /dev/null -+++ b/tests/irtoc-interpreter-tests/arithmetic_exception.pa -@@ -0,0 +1,35 @@ -+# Copyright (c) 2021 Huawei Device Co., Ltd. -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+.record panda.ArithmeticException -+ -+.function i32 main() { -+ ldai 10 -+ movi v0, 0 -+try_begin: -+ div2 v0 -+ ldai 2 -+ return -+try_end: -+ -+catch_block1_begin: -+ ldai 0 -+ return -+ -+catch_block2_begin: -+ ldai 10 -+ return -+ -+.catch panda.ArithmeticException, try_begin, try_end, catch_block1_begin -+.catchall try_begin, try_end, catch_block2_begin -+} -\ No newline at end of file -diff --git a/tests/irtoc-interpreter-tests/irtoc-opcode-suite/throw.pa b/tests/irtoc-interpreter-tests/irtoc-opcode-suite/throw.pa -new file mode 100644 -index 0000000000..72baf9925e ---- /dev/null -+++ b/tests/irtoc-interpreter-tests/irtoc-opcode-suite/throw.pa -@@ -0,0 +1,39 @@ -+# Copyright (c) 2021 Huawei Device Co., Ltd. -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+.record E1 {} -+ -+.function i32 f1() { -+ newobj v0, E1 -+try_begin: -+ throw v0 -+ ldai 2 -+ return -+try_end: -+ -+catch_block1_begin: -+ ldai 0 -+ return -+ -+catch_block2_begin: -+ ldai 10 -+ return -+ -+.catch E1, try_begin, try_end, catch_block1_begin -+.catchall try_begin, try_end, catch_block2_begin -+} -+ -+.function i32 main() { -+ call f1 -+ return -+} -\ No newline at end of file --- -2.17.1 - diff --git a/platforms/unix/libpandabase/thread.cpp b/platforms/unix/libpandabase/thread.cpp index 760ccea203df0a21586f6a340797827a46f4216f..def45ee6d6a65cf33029659cac0fcd1a0768983e 100644 --- a/platforms/unix/libpandabase/thread.cpp +++ b/platforms/unix/libpandabase/thread.cpp @@ -18,6 +18,7 @@ #include "utils/span.h" #include "utils/logger.h" +#include #include #include @@ -94,6 +95,11 @@ void NativeSleep(unsigned int ms) std::this_thread::sleep_for(std::chrono::milliseconds(ms)); } +void NativeSleepUS(std::chrono::microseconds us) +{ + std::this_thread::sleep_for(us); +} + void ThreadDetach(native_handle_type pthread_handle) { pthread_detach(pthread_handle); diff --git a/quickener/CMakeLists.txt b/quickener/CMakeLists.txt index 9b8d25173fd888339afabb1bb0a83acdb3470677..6b6dc7fb5db2d72c3ffea6e9dbaffee05892fc35 100644 --- a/quickener/CMakeLists.txt +++ b/quickener/CMakeLists.txt @@ -15,14 +15,32 @@ cmake_minimum_required(VERSION 3.10) project(quickener) +set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") +file(MAKE_DIRECTORY ${GENERATED_DIR}) +set(QUICKENER_GEN ${GENERATED_DIR}/quickener_gen.cpp) +panda_isa_gen( + TEMPLATES + "quickener_gen.cpp.erb" + "translation_table_gen.h.erb" + SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/templates + DESTINATION ${GENERATED_DIR} +) + set(QUICK_SOURCES quick.cpp + ${QUICKENER_GEN} ) panda_add_executable(arkquick ${QUICK_SOURCES}) target_link_libraries(arkquick arkbase arkfile) -add_dependencies(arkquick arkbase arkfile) +target_include_directories(arkquick + PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/templates + PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/generated +) + +add_dependencies(arkquick arkbase arkfile isa_gen_${PROJECT_NAME}) panda_add_sanitizers(TARGET arkquick SANITIZERS ${PANDA_SANITIZERS_LIST}) diff --git a/quickener/quick.cpp b/quickener/quick.cpp index b4e446d874c7b4408cd8d8260a7ff38db4ded752..4ef6153ba8acbe83ee50d88394e0eb6b0d6e52e0 100644 --- a/quickener/quick.cpp +++ b/quickener/quick.cpp @@ -79,6 +79,11 @@ int main(int argc, const char **argv) } panda::panda_file::ItemContainer *container = reader.GetContainerPtr(); + + panda::quick::Quickener quickener(container, const_cast(reader.GetFilePtr()), + reader.GetItems()); + quickener.QuickContainer(); + auto writer = panda::panda_file::FileWriter(output.GetValue()); if (!writer) { PLOG(ERROR, QUICKENER) << "Cannot create file writer with path '" << output.GetValue() << "'"; diff --git a/quickener/quick.h b/quickener/quick.h index 5fb8babc2e22f67843d1f56ba51416795c409d60..d3b9e8cc59903416fcb51e0d17c58de47049cb53 100644 --- a/quickener/quick.h +++ b/quickener/quick.h @@ -15,3 +15,4 @@ #include "libpandafile/file.h" #include "libpandafile/file_reader.h" +#include "quickener.h" diff --git a/quickener/quickener.h b/quickener/quickener.h new file mode 100644 index 0000000000000000000000000000000000000000..20fa1759368db53b5fa6964ec7a97298ec93af6f --- /dev/null +++ b/quickener/quickener.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "file.h" +#include "file_item_container.h" +#include "file_items.h" +#include "libpandafile/class_data_accessor-inl.h" +#include "source_lang_enum.h" +#include "bytecode_instruction.h" +#include "bytecode_instruction-inl.h" +#include "bytecode_instruction_enum_gen.h" +#include "debug_data_accessor.h" +#include + +namespace panda::quick { +class Quickener final { +public: + Quickener(panda_file::ItemContainer *container, panda_file::File *file, + const std::map *items) + : container_(container), file_(file), items_(items) {}; + + void QuickContainer(); + +private: + panda_file::CodeItem *GetQuickenedCode(panda_file::CodeItem *code, + const std::unordered_map *translation_map); + + panda_file::DebugInfoItem *CreateDebugInfoItem(panda_file::File::EntityId debug_info_id); + + void UpdateDebugInfo(panda_file::DebugInfoItem *debug_info_item, panda_file::File::EntityId debug_info_id); + + panda_file::ItemContainer *container_; + + panda_file::File *file_; + + std::unordered_map ids_done; + + const std::map *items_; +}; + +#include +} // namespace panda::quick diff --git a/quickener/templates/quickener_gen.cpp.erb b/quickener/templates/quickener_gen.cpp.erb new file mode 100644 index 0000000000000000000000000000000000000000..bd0f2fcd0f7a2f1f1583e9c617d9e3d0dc5383f9 --- /dev/null +++ b/quickener/templates/quickener_gen.cpp.erb @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quickener.h" +#include "libpandafile/debug_data_accessor-inl.h" +#include "libpandafile/line_number_program.h" +#include "libpandabase/utils/utf.h" +#include +#include + +namespace panda::quick { + +panda_file::DebugInfoItem *Quickener::CreateDebugInfoItem(panda_file::File::EntityId debug_info_id) +{ + auto it = ids_done.find(debug_info_id); + if (it != ids_done.end()) { + return static_cast(it->second); + } + + auto *lnp_item = container_->CreateLineNumberProgramItem(); + auto *debug_info_item = container_->CreateItem(lnp_item); + ids_done.insert({debug_info_id, static_cast(debug_info_item)}); + + panda_file::DebugInfoDataAccessor debug_acc(*file_, debug_info_id); + + debug_info_item->SetLineNumber(debug_acc.GetLineStart()); + debug_acc.EnumerateParameters([&](panda_file::File::EntityId param_id) { + auto data = file_->GetStringData(param_id); + std::string item_str(utf::Mutf8AsCString(data.data)); + auto *string_item = container_->GetOrCreateStringItem(item_str); + debug_info_item->AddParameter(string_item); + }); + + return debug_info_item; +} + +void Quickener::UpdateDebugInfo(panda_file::DebugInfoItem *debug_info_item, panda_file::File::EntityId debug_info_id) +{ + auto *lnp_item = debug_info_item->GetLineNumberProgram(); + panda_file::DebugInfoDataAccessor debug_acc(*file_, debug_info_id); + const uint8_t *program = debug_acc.GetLineNumberProgram(); + auto size = file_->GetSpanFromId(file_->GetIdFromPointer(program)).size(); + auto opcode_sp = Span(program, size); + + size_t i = 0; + panda_file::LineNumberProgramItem::Opcode opcode; + panda_file::LineProgramState state(*file_, panda_file::File::EntityId(0), debug_acc.GetLineStart(), + debug_acc.GetConstantPool()); + while ((opcode = panda_file::LineNumberProgramItem::Opcode(opcode_sp[i++])) != panda_file::LineNumberProgramItem::Opcode::END_SEQUENCE) { + switch (opcode) { + case panda_file::LineNumberProgramItem::Opcode::ADVANCE_PC: { + lnp_item->EmitAdvancePc(debug_info_item->GetConstantPool(), state.ReadULeb128()); + break; + } + case panda_file::LineNumberProgramItem::Opcode::ADVANCE_LINE: { + lnp_item->EmitAdvanceLine(debug_info_item->GetConstantPool(), state.ReadSLeb128()); + break; + } + case panda_file::LineNumberProgramItem::Opcode::START_LOCAL: { + auto [reg_number, n, is_full] = leb128::DecodeSigned(&opcode_sp[i]); + LOG_IF(!is_full, FATAL, COMMON) << "Cannot read a register number"; + i += n; + + auto name_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string name = utf::Mutf8AsCString(file_->GetStringData(name_id).data); + auto *name_item = container_->GetOrCreateStringItem(name); + + auto type_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string type_name = utf::Mutf8AsCString(file_->GetStringData(type_id).data); + auto *type_item = file_->IsExternal(type_id) + ? static_cast(container_->GetOrCreateForeignClassItem(type_name)) + : static_cast(container_->GetOrCreateClassItem(type_name)); + + lnp_item->EmitStartLocal(debug_info_item->GetConstantPool(), reg_number, name_item, + type_item->GetNameItem()); + break; + } + case panda_file::LineNumberProgramItem::Opcode::START_LOCAL_EXTENDED: { + auto [reg_number, n, is_full] = leb128::DecodeSigned(&opcode_sp[i]); + LOG_IF(!is_full, FATAL, COMMON) << "Cannot read a register number"; + i += n; + + auto name_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string name = utf::Mutf8AsCString(file_->GetStringData(name_id).data); + auto *name_item = container_->GetOrCreateStringItem(name); + + auto type_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string type_name = utf::Mutf8AsCString(file_->GetStringData(type_id).data); + auto *type_item = file_->IsExternal(type_id) + ? static_cast(container_->GetOrCreateForeignClassItem(type_name)) + : static_cast(container_->GetOrCreateClassItem(type_name)); + + auto type_signature_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string type_signature = utf::Mutf8AsCString(file_->GetStringData(type_signature_id).data); + auto *type_signature_item = container_->GetOrCreateStringItem(type_signature); + + lnp_item->EmitStartLocalExtended(debug_info_item->GetConstantPool(), reg_number, name_item, + type_item->GetNameItem(), type_signature_item); + break; + } + case panda_file::LineNumberProgramItem::Opcode::END_LOCAL: { + auto [reg_number, n, is_full] = leb128::DecodeSigned(&opcode_sp[i]); + LOG_IF(!is_full, FATAL, COMMON) << "Cannot read a register number"; + i += n; + + lnp_item->EmitEndLocal(reg_number); + break; + } + case panda_file::LineNumberProgramItem::Opcode::RESTART_LOCAL: { + auto [reg_number, n, is_full] = leb128::DecodeSigned(&opcode_sp[i]); + LOG_IF(!is_full, FATAL, COMMON) << "Cannot read a register number"; + i += n; + + lnp_item->EmitRestartLocal(reg_number); + break; + } + case panda_file::LineNumberProgramItem::Opcode::SET_PROLOGUE_END: { + lnp_item->EmitPrologEnd(); + break; + } + case panda_file::LineNumberProgramItem::Opcode::SET_EPILOGUE_BEGIN: { + lnp_item->EmitEpilogBegin(); + break; + } + case panda_file::LineNumberProgramItem::Opcode::SET_FILE: { + auto source_file_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string source_file = utf::Mutf8AsCString(file_->GetStringData(source_file_id).data); + auto *source_file_item = container_->GetOrCreateStringItem(source_file); + lnp_item->EmitSetFile(debug_info_item->GetConstantPool(), source_file_item); + break; + } + case panda_file::LineNumberProgramItem::Opcode::SET_SOURCE_CODE: { + auto source_code_id = panda_file::File::EntityId(state.ReadULeb128()); + std::string source_code = utf::Mutf8AsCString(file_->GetStringData(source_code_id).data); + auto *source_code_item = container_->GetOrCreateStringItem(source_code); + lnp_item->EmitSetFile(debug_info_item->GetConstantPool(), source_code_item); + break; + } + case panda_file::LineNumberProgramItem::Opcode::SET_COLUMN: { + lnp_item->EmitColumn(debug_info_item->GetConstantPool(), 0, state.ReadULeb128()); + break; + } + default: { + auto opcode_value = static_cast(opcode); + auto adjust_opcode = opcode_value - panda_file::LineNumberProgramItem::OPCODE_BASE; + uint32_t pc_diff = adjust_opcode / panda_file::LineNumberProgramItem::LINE_RANGE; + int32_t line_diff = + adjust_opcode % panda_file::LineNumberProgramItem::LINE_RANGE + panda_file::LineNumberProgramItem::LINE_BASE; + lnp_item->EmitSpecialOpcode(pc_diff, line_diff); + break; + } + } + } + lnp_item->EmitEnd(); +} + +void Quickener::QuickContainer() +{ + auto *class_map = container_->GetClassMap(); + + std::unordered_map new_info_map; + + for (const auto &it : *class_map) { + auto *base_class_item = it.second; + if (base_class_item->IsForeign()) { + continue; + } + + auto *class_item = static_cast(base_class_item); + + const std::unordered_map *translation_map = nullptr; + switch (class_item->GetSourceLang()) { +% Panda.quickened_plugins.each_key do |namespace| + case panda_file::SourceLang::<%= namespace.upcase %>: + translation_map = &<%= namespace.upcase %>_TRANSLATION_MAP; + break; +% end + default: + continue; + } + + if(translation_map == nullptr) { + continue; + } + + class_item->VisitMethods([&](panda_file::BaseItem *param_item) { + auto *method_item = static_cast(param_item); + auto *code_item = method_item->GetCode(); + if (code_item == nullptr) { + return true; + } + + method_item->SetCode(GetQuickenedCode(code_item, translation_map)); + auto* debug_info_item = static_cast(method_item->GetDebugInfo()); + for(const auto& item : *items_){ + if(item.second == debug_info_item){ + auto* new_debug_info = CreateDebugInfoItem(item.first); + new_info_map.emplace(new_debug_info, item.first); + method_item->SetDebugInfo(new_debug_info); + debug_info_item->SetNeedsEmit(false); + break; + } + } + + code_item->SetNeedsEmit(false); + + return true; + }); + } + + container_->ComputeLayout(); + + for(auto& inf : new_info_map){ + if(inf.first != nullptr){ + UpdateDebugInfo(inf.first, inf.second); + } + } + + container_->SetQuickened(); + container_->DeduplicateItems(true); +} + +panda_file::CodeItem *Quickener::GetQuickenedCode(panda_file::CodeItem *code, const std::unordered_map *translation_map) +{ + // branch original offset -> branch imm size in bytes + struct BranchInfo { + size_t offset; + size_t imm_size; + }; + // TODO(nsizov): refactor this function when BytecodeInstr api would be done for quickened instrs + std::unordered_map offset_map; + std::vector branches; + std::vector new_instructions; + std::vector new_try_blocks; + + size_t offset = 0; + offset_map.emplace(offset, 0); + BytecodeInstruction inst(code->GetInstructions()->data()); + while (offset < code->GetCodeSize()) { + const size_t pref_size = inst.IsPrefixed() ? 2 : 1; + + // save offset to a jump instruction and its imm bytes count + if (inst.HasFlag(BytecodeInstruction::Flags::JUMP)) { + branches.emplace_back(BranchInfo {offset, inst.GetSize() - pref_size}); + } + + size_t copy_idx = 0; + // TODO(nsizov): deduct actual map depending on method sourcelang + auto it = translation_map->find(static_cast(inst.GetOpcode())); + if (it != translation_map->end()) { + new_instructions.push_back(it->second); + copy_idx = pref_size; + } + while (copy_idx < inst.GetSize()) { + new_instructions.push_back(inst.ReadByte(copy_idx++)); + } + + offset += inst.GetSize(); + inst = inst.GetNext(); + + offset_map.emplace(offset, new_instructions.size()); + } + + for (auto &block : code->GetTryBlocks()) { + std::vector new_catch_blocks; + for (auto &catch_block : block.GetCatchBlocks()) + { + auto size = offset_map[catch_block.GetHandlerPc() + catch_block.GetCodeSize()] - + offset_map[catch_block.GetHandlerPc()]; + + new_catch_blocks.emplace_back(panda::panda_file::CodeItem::CatchBlock( + catch_block.GetMethod(), catch_block.GetType(), offset_map[catch_block.GetHandlerPc()], size)); + catch_block.SetNeedsEmit(false); + } + + auto length = offset_map[block.GetStartPc() + block.GetLength()] - offset_map[block.GetStartPc()]; + + block.SetNeedsEmit(false); + + new_try_blocks.emplace_back( + panda::panda_file::CodeItem::TryBlock(offset_map[block.GetStartPc()], length, new_catch_blocks)); + } + + for (auto &branch : branches) { + auto it_branch_offset = offset_map.find(branch.offset); + LOG_IF(it_branch_offset == offset_map.end(), FATAL, QUICKENER) << "Invalid branch offset"; + + ssize_t old_jump_offset = 0; + for (size_t i = branch.imm_size; i > 0; i--) { + // NOLINTNEXTLINE(hicpp-signed-bitwise) + old_jump_offset <<= CHAR_BIT; + // NOLINTNEXTLINE(hicpp-signed-bitwise) + old_jump_offset |= (new_instructions[it_branch_offset->second + i]); + } + + // sign extend the offset + const uint8_t bytes_to_shift = CHAR_BIT * branch.imm_size - 1; + // NOLINTNEXTLINE(hicpp-signed-bitwise, clang-analyzer-core.UndefinedBinaryOperatorResult) + if (old_jump_offset >> bytes_to_shift == 1) { + // NOLINTNEXTLINE(hicpp-signed-bitwise) + old_jump_offset |= ((~old_jump_offset >> bytes_to_shift) << bytes_to_shift); + } + auto branch_target = offset_map.find(branch.offset + old_jump_offset); + LOG_IF(branch_target == offset_map.end(), FATAL, QUICKENER) << "Invalid jump target offset"; + + int64_t new_jump_offset = branch_target->second - it_branch_offset->second; + + static constexpr const uint8_t FIRST_BYTE_MASK = 0xff; + for (size_t i = 1; i <= branch.imm_size; i++) { + // NOLINTNEXTLINE(hicpp-signed-bitwise) + new_instructions[it_branch_offset->second + i] = (new_jump_offset & FIRST_BYTE_MASK); + // NOLINTNEXTLINE(hicpp-signed-bitwise) + new_jump_offset >>= CHAR_BIT; + } + } + + auto* new_code = container_->CreateItem(code->GetNumVregs(), code->GetNumArgs(), + std::move(new_instructions)); + + for(auto& block : new_try_blocks) { + new_code->AddTryBlock(block); + } + + return new_code; +} + +} // namespace panda::quick diff --git a/verification/type/type_param.h b/quickener/templates/translation_table_gen.h.erb similarity index 40% rename from verification/type/type_param.h rename to quickener/templates/translation_table_gen.h.erb index 0b6b773410930891ee33bccba3323b9a77f51ded..34b5a30fb469352d56cc161ffe2a3a45f0b0d9e8 100644 --- a/verification/type/type_param.h +++ b/quickener/templates/translation_table_gen.h.erb @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2021-2022 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,37 +13,16 @@ * limitations under the License. */ -#ifndef _PANDA_TYPE_PARAM_HPP__ -#define _PANDA_TYPE_PARAM_HPP__ - -#include "type_index.h" -#include "type_systems.h" -#include "type_type.h" - -namespace panda::verifier { -class TypeParams; - -class TypeParam : public TypeParamIdx { -public: - TypeParam() = delete; - TypeParam(TypeParam &&) = default; - TypeParam(const TypeParam &) = default; - TypeParam &operator=(TypeParam &&) = default; - TypeParam &operator=(const TypeParam &) = default; - ~TypeParam() = default; - - TypeParams operator>>(const TypeParam &p) const; - operator TypeParams() const; - operator Type() const; - -private: - TypeSystemKind kind_; - ThreadNum threadnum_; - TypeParam(TypeVariance v, const Type &t); - TypeParam(TypeSystemKind kind, ThreadNum threadnum, const TypeParamIdx &p); - friend class Type; - friend class TypeParams; +% Quick::select.each do |ns, insns| +const std::unordered_map <%= ns.upcase %>_TRANSLATION_MAP = { +% insns.each_with_index do |i, index| +% ins = Panda::instructions.select {|insn| i.mnemonic == insn.mnemonic && (("pref_" + i.format.pretty == insn.format.pretty && i.namespace != "core")||(i.format.pretty == insn.format.pretty && i.namespace == "core"))} +% if index != insns.size - 1 + {<%= ins[0].opcode_idx %>, <%= index %>}, +% else + {<%= ins[0].opcode_idx %>, <%= index %>} +% end +% end }; -} // namespace panda::verifier -#endif // !_PANDA_TYPE_PARAM_HPP__ +% end diff --git a/runtime/BUILD.gn b/runtime/BUILD.gn index 8aa743dd1696576a23077f5e88ab9ef4026c7db8..36c30408d12fa7469ea6980305e93e223757112a 100644 --- a/runtime/BUILD.gn +++ b/runtime/BUILD.gn @@ -83,6 +83,19 @@ foreach(plugin, enabled_plugins) { src_scope = { } } + + if (defined(source_files.srcs_verification_path)) { + source_file = "$plugin_dir/${source_files.srcs_verification_path}" + src_scope = read_file(source_file, "scope") + if (defined(src_scope.srcs)) { + foreach(src, src_scope.srcs) { + verifier_sources += [ "$plugin_dir/verification/$src" ] + } + } + src_sope = { + } + } + source_files = [] } @@ -151,7 +164,7 @@ group("arkruntime_header_deps") { "$ark_root/verification/gen:isa_gen_verification_gen_abs_int_inl_gen_h", "$ark_root/verification/gen:isa_gen_verification_gen_cflow_iterate_inl_gen_h", "$ark_root/verification/gen:isa_gen_verification_gen_job_fill_gen_h", - "$ark_root/verification/gen:lang_specifics_h", + "$ark_root/verification/gen:verification_gen_plugins_gen_inc", "$ark_root/verification/gen:verification_abs_int_inl_compat_checks_h", "$ark_root/verification/gen:verification_verifier_messages_h", "$ark_root/verification/gen:verification_verifier_messages_data_cpp", @@ -264,6 +277,10 @@ source_set("libarkruntime_set_static") { "tooling/debug_inf.cpp", "tooling/debugger.cpp", "tooling/pt_thread.cpp", + "tooling/tools.cpp", + "tooling/sampler/sampling_profiler.cpp", + "tooling/sampler/sample_writer.cpp", + "tooling/sampler/thread_communicator.cpp", "vtable_builder.cpp", ] @@ -286,6 +303,7 @@ source_set("libarkruntime_set_static") { "bridge/arch/arm/compiled_code_to_interpreter_bridge_arm.S", "bridge/arch/arm/compiled_code_to_interpreter_bridge_dyn_arm.S", "bridge/arch/arm/compiled_code_to_runtime_bridge_arm.S", + "bridge/arch/arm/expand_compiled_code_args_dyn_arm.S", "bridge/arch/arm/deoptimization_arm.S", "bridge/arch/arm/interpreter_to_compiled_code_bridge_arm.S", "bridge/arch/arm/interpreter_to_compiled_code_bridge_dyn_arm.S", @@ -298,6 +316,7 @@ source_set("libarkruntime_set_static") { "bridge/arch/aarch64/compiled_code_to_interpreter_bridge_aarch64.S", "bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S", "bridge/arch/aarch64/compiled_code_to_runtime_bridge_aarch64.S", + "bridge/arch/aarch64/expand_compiled_code_args_dyn_aarch64.S", "bridge/arch/aarch64/deoptimization_aarch64.S", "bridge/arch/aarch64/interpreter_to_compiled_code_bridge_aarch64.S", "bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S", @@ -319,6 +338,7 @@ source_set("libarkruntime_set_static") { "bridge/arch/amd64/compiled_code_to_interpreter_bridge_amd64.S", "bridge/arch/amd64/compiled_code_to_interpreter_bridge_dyn_amd64.S", "bridge/arch/amd64/compiled_code_to_runtime_bridge_amd64.S", + "bridge/arch/amd64/expand_compiled_code_args_dyn_amd64.S", "bridge/arch/amd64/deoptimization_amd64.S", "bridge/arch/amd64/interpreter_to_compiled_code_bridge_amd64.S", "bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S", diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt index 52eddff896bedc6aa7df264ffe081c1b8fdf4454..2bc9ca975153b9470dd869145277bb39c78a2e71 100644 --- a/runtime/CMakeLists.txt +++ b/runtime/CMakeLists.txt @@ -31,6 +31,10 @@ set(SOURCES tooling/debugger.cpp tooling/pt_thread.cpp tooling/debug_inf.cpp + tooling/tools.cpp + tooling/sampler/sampling_profiler.cpp + tooling/sampler/sample_writer.cpp + tooling/sampler/thread_communicator.cpp field.cpp gc_task.cpp dprofiler/dprofiler.cpp @@ -135,6 +139,7 @@ if(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP) bridge/arch/arm/compiled_code_to_interpreter_bridge_dyn_arm.S bridge/arch/arm/interpreter_to_compiled_code_bridge_dyn_arm.S bridge/arch/arm/compiled_code_to_runtime_bridge_arm.S + bridge/arch/arm/expand_compiled_code_args_dyn_arm.S bridge/arch/arm/deoptimization_arm.S arch/arm/osr_arm.S arch/arm/interpreter_support.S) @@ -145,6 +150,7 @@ elseif (PANDA_TARGET_ARM32_ABI_HARD) bridge/arch/arm/compiled_code_to_interpreter_bridge_dyn_arm.S bridge/arch/arm/interpreter_to_compiled_code_bridge_dyn_arm.S bridge/arch/arm/compiled_code_to_runtime_bridge_arm.S + bridge/arch/arm/expand_compiled_code_args_dyn_arm.S bridge/arch/arm/deoptimization_arm.S arch/arm/osr_arm.S arch/arm/interpreter_support.S) @@ -155,6 +161,7 @@ elseif(PANDA_TARGET_ARM64) bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S bridge/arch/aarch64/compiled_code_to_runtime_bridge_aarch64.S + bridge/arch/aarch64/expand_compiled_code_args_dyn_aarch64.S bridge/arch/aarch64/deoptimization_aarch64.S arch/aarch64/osr_aarch64.S arch/aarch64/interpreter_support.S @@ -173,6 +180,7 @@ elseif(PANDA_TARGET_AMD64) bridge/arch/amd64/interpreter_to_compiled_code_bridge_amd64.S bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S bridge/arch/amd64/compiled_code_to_runtime_bridge_amd64.S + bridge/arch/amd64/expand_compiled_code_args_dyn_amd64.S bridge/arch/amd64/deoptimization_amd64.S arch/amd64/common_amd64.S arch/amd64/osr_amd64.S @@ -470,19 +478,22 @@ if(PANDA_WITH_TESTS AND NOT PANDA_QEMU_AARCH64_GCC_8) arkbase arkfile entrypoints_gen + runtime_language_context + runtime_intrinsics_inl + source_languages_h ) +endif() - if(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP) - SET(INVOKE_HELPER tests/arch/arm/invokation_helper.S) - elseif (PANDA_TARGET_ARM32_ABI_HARD) - SET(INVOKE_HELPER tests/arch/arm/invokation_helper_hf.S) - elseif(PANDA_TARGET_ARM64) - SET(INVOKE_HELPER tests/arch/aarch64/invokation_helper.S) - elseif(PANDA_TARGET_X86) - #SET(INVOKE_HELPER tests/arch/x86/invokation_helper.S) - elseif(PANDA_TARGET_AMD64) - SET(INVOKE_HELPER tests/arch/amd64/invokation_helper.S) - endif() +if(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP) +SET(INVOKE_HELPER tests/arch/arm/invokation_helper.S) +elseif (PANDA_TARGET_ARM32_ABI_HARD) +SET(INVOKE_HELPER tests/arch/arm/invokation_helper_hf.S) +elseif(PANDA_TARGET_ARM64) +SET(INVOKE_HELPER tests/arch/aarch64/invokation_helper.S) +elseif(PANDA_TARGET_X86) +#SET(INVOKE_HELPER tests/arch/x86/invokation_helper.S) +elseif(PANDA_TARGET_AMD64) +SET(INVOKE_HELPER tests/arch/amd64/invokation_helper.S) endif() function(add_gtests test_name) @@ -737,6 +748,7 @@ if (TARGET arkruntime_test_interpreter_impl) tests/interpreter/test_interpreter.cpp tests/interpreter/test_runtime_interface.cpp tests/interpreter_test.cpp + tests/interpreter_test_switch.cpp tests/invokation_helper.cpp $ ${INVOKE_HELPER} @@ -800,6 +812,7 @@ add_gtests( tests/math_helpers_test.cpp tests/stack_walker_test.cpp tests/time_utils_test.cpp + ${INVOKE_HELPER} ) include(intrinsics.cmake) diff --git a/runtime/arch/asm_support.h b/runtime/arch/asm_support.h index 2a7fc33b2adc4eb264af64b1cdd33703402ba5c5..e22f181e7f301bf7fb5399834186c9134d4e341c 100644 --- a/runtime/arch/asm_support.h +++ b/runtime/arch/asm_support.h @@ -96,12 +96,6 @@ #endif -//TODO(Denis Krylov) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define JSFUNCTION_CONSTANT_POOL_OFFSET (0x50) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define JSFUNCTION_LEXICAL_ENVIRONMENT_OFFSET (0x30) - // clang-format on #endif // PANDA_ASM_SUPPORT_H diff --git a/runtime/bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S b/runtime/bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S index 639d998d3a28615adf1ea0a280a1de0448988f7f..fb556cb0e97aa5e3df8cfebed30c83aa2dfd2df4 100644 --- a/runtime/bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S +++ b/runtime/bridge/arch/aarch64/compiled_code_to_interpreter_bridge_dyn_aarch64.S @@ -33,6 +33,10 @@ CompiledCodeToInterpreterBridgeDyn: CFI_STARTPROC CFI_DEF_CFA(sp, 0) + // method: x0 + // num_args: x1 + // arg_i: 24(fp, i, 8) + // Construct bridge frame: // lr // COMPILED_CODE_TO_INTERPRETER_BRIDGE @@ -109,126 +113,96 @@ CompiledCodeToInterpreterBridgeDyn: // Restore x0 and x1 args since they contain Method* and actual_num_args ldp x0, x1, [sp] - // Set Ecmascript Environment - ldp x2, x3, [sp, #16] - // size(x0..x8) + size(EcamscriptEnvironment) - add sp, sp, #96 - // x2 - thisFunc - ldr x16, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] - // Save prev EcamscriptEnvironment* (x16) and thisFunc* (x2) - stp x2, x16, [sp, #-16]! - // Get lexical environment from thisFunction - ldr x20, [x2, #JSFUNCTION_LEXICAL_ENVIRONMENT_OFFSET] - // Get constant pool from thisFunction - ldr x18, [x2, #JSFUNCTION_CONSTANT_POOL_OFFSET] - // Save lexical environment* (x20) and constant pool (x18) - stp x18, x20, [sp, #-16]! - // Save real laguage extension data offset in Thread Reg field + // x19 - func (stack arg0) + ldr x19, [fp, #24] +#if defined(PANDA_WITH_ECMASCRIPT) && defined(ARK_INTRINSIC_SET) + sub sp, sp, ECMASCRIPT_ENVIRONMENT_SIZE + ldr x20, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] + str x20, [sp, #ECMASCRIPT_ENVIRONMENT_PREV_ENVIRONMENT_OFFSET] + str x19, [sp, #ECMASCRIPT_ENVIRONMENT_THIS_FUNC_OFFSET] + ldr x20, [x19, #JSFUNCTION_LEXICAL_ENV_OFFSET] + str x20, [sp, #ECMASCRIPT_ENVIRONMENT_LEXICAL_ENV_OFFSET] + ldr x20, [x19, #JSFUNCTION_CONSTANT_POOL_OFFSET] + str x20, [sp, #ECMASCRIPT_ENVIRONMENT_CONSTANT_POOL_OFFSET] mov x20, sp str x20, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] - // normalize to state before EE prologue - sub sp, sp, #64 - // End ecmascript prologue - - // setup regs as follow to survive the call - // x19 - actual_num_args, x20 - method - mov x20, x0 - mov w23, w1 - mov w0, w1 +#endif + + // setup regs as follows to survive the call + // x19 - method* + // x20 - num_actual_args + mov x19, x0 + mov w20, w1 // create an interpreter frame - // get max(method->num_args, num_actual_args) in x0 - // and rest_num_args in w22 (rest_num_args = max(method->num_args - actual_num_args, 0)) - ldr w19, [x20, #METHOD_NUM_ARGS_OFFSET] - cmp w19, w0 - csel w19, w0, w19, ls - sub w22, w19, w0 - mov x1, x20 + mov w0, w1 + mov x1, x19 mov x2, fp - // Frame* CreateFrameForMethodWithActualArgsDyn(uint32_t num_actual_args, Method* method, Frame* prev); bl CreateFrameForMethodWithActualArgsDyn - - // setup regs as follow - // w0 - actual_num_args, x1 - args, x9, x10 - temp, - // x19 - iframe.vregs_ + num_vregs_, x20 - method, - // x21 - iframe, w22 - rest_num_args mov x21, x0 - ldr w9, [x21, #FRAME_NUM_VREGS_OFFSET] - sub w9, w9, w19 - mov w0, w23 - add x19, x21, #FRAME_VREGS_OFFSET - add x19, x19, x9, lsl 3 - add x1, fp, #24 - - // copy function object (value only) - ldp x9, x10, [sp, #16] - str x9, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - str x10, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - // copy x4, x5 - ldp x9, x10, [sp, #32] - str x9, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - str x10, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - // copy x6, x7 - ldp x9, x10, [sp, #48] - str x9, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - str x10, [x19], #8 - sub w0, w0, #1 - cbz w0, 2f - - // copy stack arguments into the frame -1: cbz w0, 2f - ldp x9, x10, [x1], #16 - str x9, [x19], #8 - sub w0, w0, #1 - - cbz w0, 2f - str x10, [x19], #8 - sub w0, w0, #1 - - b 1b - - // fill in the rest args (the arguments which are declared but not specified in the call instruction) -2: cbz w22, 4f - // setup regs as follow - // x0, x1 - initial value, x19 - iframe.vregs_ + num_vregs_, x20 - method, - // x21 - iframe, w22 - rest_num_args - - // get initial value in x0, x1 - mov x0, x20 - bl GetInitialTaggedValue -3: cbz w22, 4f - str x0, [x19], #8 - sub w22, w22, #1 - b 3b - -4: // call InterpreterEntryPoint - mov x0, x20 + + // setup regs as follows + // x19 - method* + // w20 - num_actual_args + // x21 - iframe* + + // w22 - num_iframe_args = max(num_actual_args, mehtod->num_args_) + // x23 - iframe.vregs_ + num_vregs_ + ldr w22, [x19, #METHOD_NUM_ARGS_OFFSET] + cmp w22, w20 + csel w22, w22, w20, hs + ldr w23, [x21, #FRAME_NUM_VREGS_OFFSET] + sub w23, w23, w22 + add x23, x21, x23, lsl 3 + add x23, x23, FRAME_VREGS_OFFSET + + cbz w20, .Linit_rest + + // copy actual args + // x0 - incoming stack arguments + add x0, fp, 24 + sub w1, w20, 1 + lsl w1, w1, 3 +.Lloop_actual: + ldr x2, [x0, x1] + str x2, [x23, x1] + subs w1, w1, 8 + bhs .Lloop_actual + +.Linit_rest: + // w22 - num_rest_args = num_iframe_args - num_actual_args + subs w22, w22, w20 + beq .Lcall_interpreter + + // init rest args + // %x0 - iframe.vregs_ + num_vregs_ + num_actual_args + // %x23 - initial tagged value (TAGGED_VALUE_UNDEFINED) + lsl w0, w20, 3 + add x0, x0, x23 + mov x23, TAGGED_VALUE_UNDEFINED + sub w22, w22, 1 + lsl w22, w22, 3 +.Lloop_rest: + str x23, [x0, x22] + subs x22, x22, FRAME_VREGISTER_SIZE + bhs .Lloop_rest + +.Lcall_interpreter: + // call InterpreterEntryPoint + mov x0, x19 mov x1, x21 bl InterpreterEntryPoint // handle the result - // setup regs as follow - // x0 - iframe, x19, x20 - result, x21 - iframe.acc_ - mov x0, x21 - add x21, x21, #FRAME_ACC_OFFSET - ldp x19, x20, [x21] + // setup regs as follows + // x21 - iframe + // x19, x20 - result + add x20, x21, #FRAME_ACC_OFFSET + ldp x19, x20, [x20] + mov x0, x21 bl FreeFrame + mov x0, x19 mov x1, x20 @@ -251,12 +225,12 @@ CompiledCodeToInterpreterBridgeDyn: CFI_RESTORE(x20) CFI_RESTORE(x19) - // Ecmascript epilogue - // Get EcamscriptEnvironment* - ldr x16, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] - ldr x17, [x16, #24] - str x17, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] - // End Ecmascript epilogue +#if defined(PANDA_WITH_ECMASCRIPT) && defined(ARK_INTRINSIC_SET) + // Destroy EcmascriptEnvironment + ldr x2, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] + ldr x2, [x2, ECMASCRIPT_ENVIRONMENT_PREV_ENVIRONMENT_OFFSET] + str x2, [THREAD_REG, #MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET] +#endif ldr fp, [sp, #8] CFI_RESTORE(fp) diff --git a/runtime/bridge/arch/aarch64/expand_compiled_code_args_dyn_aarch64.S b/runtime/bridge/arch/aarch64/expand_compiled_code_args_dyn_aarch64.S new file mode 100644 index 0000000000000000000000000000000000000000..274a6761465c53fd39509a78546e865218de22a7 --- /dev/null +++ b/runtime/bridge/arch/aarch64/expand_compiled_code_args_dyn_aarch64.S @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "arch/asm_support.h" +#include "arch/amd64/helpers_amd64.S" + +.global ExpandCompiledCodeArgsDyn +TYPE_FUNCTION(ExpandCompiledCodeArgsDyn) +ExpandCompiledCodeArgsDyn: + //x0 - method, preserved + //x1 - num_actual, preserved + //x2 - num_expected, tmp1 + //x3 - tmp2 + //x4 - tmp3 + //x5 - tmp4 + + // x2 - 8 * (aligned(2) num_expected - num_actual) + // w3 - num_actual_args + 2 (for fp, lr) + // x4 - sp before moving + sub x2, x1, x2 + and x2, x2, -2 + lsl x2, x2, 3 + add w3, w1, 2 + mov x4, sp + + // Move sp and fp + add sp, sp, x2 + add fp, fp, x2 + +.Lloop_move: + ldr x5, [x4] + str x5, [x4, x2] + add x4, x4, 8 + subs w3, w3, 1 + bhi .Lloop_move + + mov w5, TAGGED_VALUE_UNDEFINED + // Use loop counter as index + add x4, x4, x2 + sub x4, x4, 8 + neg x2, x2 +.Lloop_init: + str x5, [x4, x2] + subs w2, w2, 8 + bhi .Lloop_init + + ret diff --git a/runtime/bridge/arch/aarch64/handle_call_imm16_v16_aarch64.S b/runtime/bridge/arch/aarch64/handle_call_imm16_v16_aarch64.S index fc3a1419503da392765309e85a16a7807c7c8725..78e959ec52e1c343a3ed5ffadc15a456276318c9 100644 --- a/runtime/bridge/arch/aarch64/handle_call_imm16_v16_aarch64.S +++ b/runtime/bridge/arch/aarch64/handle_call_imm16_v16_aarch64.S @@ -18,66 +18,28 @@ // x0-x7 - arguments, x9 - frame.vregs, x10 - insn_ptr, x12 - method, x19 - frame // x13, x14, x15 - temp - // Prepare to set up ABI regs 0 and 1: - ldrb w0, [x10], 1 - ldrb w1, [x10], 1 - - // ABI arg reg 1 (x1) <- num_args - lsl w1, w1, 8 - orr w1, w0, w1 - // ABI arg reg 0 (x0) <- panda::Method* mov x0, x12 - cbz w1, .Linvoke - - // x9 <- frame.vregs + vreg_idx (copy base) - ldrb w2, [x10], 1 - ldrb w3, [x10] - lsl w3, w3, 8 - orr w2, w2, w3 - add x9, x9, x2, lsl 3 - - // w10 <- counter of args - mov w10, w1 - - // ABI arg reg 2 (r2) <- boxed arg0 from user's code - ldr x2, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke - - // ABI arg reg 3 (r3) <- boxed arg1 from user's code - ldr x3, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke - - // ABI arg reg 4 (r4) <- boxed arg2 from user's code - ldr x4, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke + // ABI arg reg 1 (x1) <- num_args + ldrh w1, [x10] - // ABI arg reg 5 (r5) <- boxed arg3 from user's code - ldr x5, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke + cbz w1, .Linvoke - // ABI arg reg 6 (r6) <- boxed arg4 from user's code - ldr x6, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke + // x9 <- range in frame.vregs + ldrh w13, [x10, 2] + add x9, x9, x13, lsl 3 - // ABI arg reg 7 (r7) <- boxed arg5 from user's code - ldr x7, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke + // Reserve stack args + sub x13, sp, x1, lsl 3 + and sp, x13, -16 - // Reserve stack space for the arguments, starting from x13: - sub sp, sp, x10, lsl 3 - mov x13, sp + lsl w13, w1, 3 + sub w13, w13, 8 +1: + ldr x14, [x9, x13] + str x14, [sp, x13] + subs w13, w13, 8 + bhs 1b - // Push the rest arguments to the stack (r14 is temp): -1: ldr x14, [x9], FRAME_VREGISTER_SIZE - str x14, [x13], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbnz w10, 1b b .Linvoke diff --git a/runtime/bridge/arch/aarch64/handle_call_imm4_v4_v4_v4_aarch64.S b/runtime/bridge/arch/aarch64/handle_call_imm4_v4_v4_v4_aarch64.S index 672586ee8806d6d0a2cb82c006e22dcefb7aeb24..57673166c29c8450df43788f7669144f0592e4c8 100644 --- a/runtime/bridge/arch/aarch64/handle_call_imm4_v4_v4_v4_aarch64.S +++ b/runtime/bridge/arch/aarch64/handle_call_imm4_v4_v4_v4_aarch64.S @@ -21,52 +21,40 @@ // ABI arg reg 0 (x0) <- panda::Method* mov x0, x12 + // load imm4,v4,v4,v4,v4,v4 into w13 + ldr w13, [x10] + // ABI arg reg 1 (x1/w1) <- num_args - ldrb w13, [x10], 1 - and w1, w13, 0xF + and w1, w13, 0x000F cbz w1, .Linvoke - // ABI arg reg 2 (x2) <- boxed arg0 from user's code - and w2, w13, -16 - lsr w2, w2, #1 - add x2, x9, x2 - ldr x2, [x2] - cmp w1, 1 - beq .Linvoke // No more args from user's code? + // Reserve stack args + sub x14, sp, x1, lsl 3 + and sp, x14, -16 + +.macro load_calli_dyn_short_arg i + lsr w14, w13, (4 * (\i + 1) - 3) + and w14, w14, (0xF << 3) + ldr x14, [x9, x14] + str x14, [sp, 8 * \i] +.endm - // Read next insn byte for user's arg1 and arg2: - ldrb w13, [x10], 1 + load_calli_dyn_short_arg 0 + cmp w1, 1 + beq .Linvoke - // ABI arg reg 3 (x3) <- boxed arg1 from user's code - and w3, w13, 0xF - add x3, x9, x3, lsl 3 - ldr x3, [x3] + load_calli_dyn_short_arg 1 cmp w1, 2 - beq .Linvoke // No more args from user's code? + beq .Linvoke - // ABI arg reg 4 (x4) <- boxed arg2 from user's code - and w4, w13, -16 - lsr w4, w4, #1 - add x4, x9, x4 - ldr x4, [x4] + load_calli_dyn_short_arg 2 cmp w1, 3 - beq .Linvoke // No more args from user's code? - - // Read next insn byte for user's arg3 and arg4: - ldrb w13, [x10], 1 + beq .Linvoke - // ABI arg reg 5 (x5) <- boxed arg3 from user's code - and w5, w13, 0xF - add x5, x9, x5, lsl 3 - ldr x5, [x5] + load_calli_dyn_short_arg 3 cmp w1, 4 - beq .Linvoke // No more args from user's code? - - // ABI arg reg 6 (x6) <- boxed arg4 from user's code - and w6, w13, -16 - lsr w6, w6, #1 - add x6, x9, x6 - ldr x6, [x6] + beq .Linvoke + load_calli_dyn_short_arg 4 b .Linvoke diff --git a/runtime/bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S b/runtime/bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S index 8f1bd7db96d0438ccf6ee20113b869a074942cbf..81a8774b86b2bb0e01cef2d24810a89d46e29c58 100644 --- a/runtime/bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S +++ b/runtime/bridge/arch/aarch64/interpreter_to_compiled_code_bridge_dyn_aarch64.S @@ -76,10 +76,13 @@ InterpreterToCompiledCodeBridgeDyn: // new call format was introduced and you have to implement the corresponding handler. #include "bridge_dispatch_dyn_aarch64.S" + // invoke the method + // since the first argument is Method* it must be in x0 .Linvoke: // invoke the entrypoint - ldr lr, [x12, METHOD_COMPILED_ENTRY_POINT_OFFSET] + ldr lr, [x0, METHOD_COMPILED_ENTRY_POINT_OFFSET] blr lr + // sp may be modified by call // handle the result // setup regs as follow: @@ -146,53 +149,23 @@ InvokeCompiledCodeWithArgArrayDyn: // No arguments passed to the callee cbz w1, .Linvoke_ - mov w10, w1 - - // ABI arg reg 2 <- boxed arg0 from user's code - ldr x2, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // ABI arg reg 3 <- boxed arg1 from user's code - ldr x3, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // ABI arg reg 4 <- boxed arg2 from user's code - ldr x4, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // ABI arg reg 5 <- boxed arg3 from user's code - ldr x5, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // ABI arg reg 6 <- boxed arg4 from user's code - ldr x6, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // ABI arg reg 7 <- boxed arg5 from user's code - ldr x7, [x9], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbz w10, .Linvoke_ - - // setup stack args - // reserve stack space - // x13 - stack pointer - sub x13, sp, x10, lsl 3 - and x13, x13, -16 - mov sp, x13 - // copy arguments to the stack -1: ldr x11, [x9], FRAME_VREGISTER_SIZE - str x11, [x13], FRAME_VREGISTER_SIZE - sub w10, w10, 1 - cbnz w10, 1b + + // Reserve stack args + sub x10, sp, x1, lsl 3 + and sp, x10, -16 + + lsl w10, w1, 3 + sub w10, w10, 8 +.Lloop_: + ldr x11, [x9, x10] + str x11, [sp, x10] + subs w10, w10, 8 + bhs .Lloop_ .Linvoke_: // invoke the entrypoint blr lr + // sp may be modified by call sub sp, fp, 32 ldp x19, THREAD_REG, [sp], 48 diff --git a/runtime/bridge/arch/amd64/compiled_code_to_interpreter_bridge_dyn_amd64.S b/runtime/bridge/arch/amd64/compiled_code_to_interpreter_bridge_dyn_amd64.S index e8ffe5c67992d150391984b032f1ef57957427ad..9887ac5542a5fa3d83a19f8842d9d086a92c0079 100644 --- a/runtime/bridge/arch/amd64/compiled_code_to_interpreter_bridge_dyn_amd64.S +++ b/runtime/bridge/arch/amd64/compiled_code_to_interpreter_bridge_dyn_amd64.S @@ -27,7 +27,7 @@ // DecodedTaggedValue GetInitialTaggedValue(Method*) .extern GetInitialTaggedValue -// CompiledCodeToInterpreterBridgeDyn(Method* method, uint32_t num_args, int64_t func_obj, int64_t func_tag, int64_t arg_i, int64_t tag_i, ...) +// CompiledCodeToInterpreterBridgeDyn, follows DynamicMethod calling convention .global CompiledCodeToInterpreterBridgeDyn TYPE_FUNCTION(CompiledCodeToInterpreterBridgeDyn) CompiledCodeToInterpreterBridgeDyn: @@ -36,10 +36,7 @@ CompiledCodeToInterpreterBridgeDyn: // method: %rdi // num_args: %rsi - // func_obj: %rdx - // arg_0: %r8 - // arg_i: 8*i(%rbp) - // tag_i: 8*(i+1)(%rbp) + // arg_i: 24(%rbp, i, 8) // Save return address to the TLS field movq (%rsp), %rax @@ -105,137 +102,90 @@ CompiledCodeToInterpreterBridgeDyn: POP_GENERAL_REGS - // Set Ecmascript Environment - // %rdx - thisFunc* - // Save EcamscriptEnvironment* in %r12 - movq MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG), %r12 - // Save prev EcamscriptEnvironment* in current Ecmascript Environment - pushq %r12 - // Save thisFunc* in current Ecmascript Environment - pushq %rdx - // Get lexical environment from thisFunction //TODO(dkrylov): GetOffset lexical environment - mov JSFUNCTION_LEXICAL_ENVIRONMENT_OFFSET(%rdx), %r14 - // Save lexical environment* in current Ecmascript Environment - pushq %r14 - // Get constant pool from thisFunction //TODO(dkrylov): GetOffset constant pool + // %rdx - func (stack arg0) + movq 24(%rbp), %rdx + +#if defined(PANDA_WITH_ECMASCRIPT) && defined(ARK_INTRINSIC_SET) + // Setup EcmascriptEnvironment + subq $ECMASCRIPT_ENVIRONMENT_SIZE, %rsp + movq MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG), %r14 + movq %r14, ECMASCRIPT_ENVIRONMENT_PREV_ENVIRONMENT_OFFSET(%rsp) + movq %rdx, ECMASCRIPT_ENVIRONMENT_THIS_FUNC_OFFSET(%rsp) + mov JSFUNCTION_LEXICAL_ENV_OFFSET(%rdx), %r14 + movq %r14, ECMASCRIPT_ENVIRONMENT_LEXICAL_ENV_OFFSET(%rsp) mov JSFUNCTION_CONSTANT_POOL_OFFSET(%rdx), %r14 - // Save constant pool in current Ecmascript Environment - pushq %r14 - // Save real laguage extension data offset in Thread Reg field + movq %r14, ECMASCRIPT_ENVIRONMENT_CONSTANT_POOL_OFFSET(%rsp) mov %rsp, MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG) - // End ecmascript prologue +#endif // save arguments to the stack PUSH_GENERAL_REGS - // %rsp % 16 == 0 here - // save method* before call - movq %rdi, %r14 - - // create an interpreter frame - movq %rsi, %rdi - - // get num args in r15d - // num_args = max(num_args, method->num_args_) - movl METHOD_NUM_ARGS_OFFSET(%r14), %r15d - cmpl %r15d, %edi - cmovge %edi, %r15d - // get number of rest arguments in r13 - movl %r15d, %r13d - subl %edi, %r13d - - movq %r14, %rsi - leaq (%rbp), %rdx + // Create an interpreter frame + // %r12 - iframe* + xchgq %rsi, %rdi + movq %rbp, %rdx // Frame* CreateFrameForMethodWithActualArgsDyn(uint32_t num_actual_args, Method* method, Frame* prev); callq CreateFrameForMethodWithActualArgsDyn@plt - movq %rax, %r12 // iframe* + movq %rax, %r12 POP_GENERAL_REGS - // setup regs as follow - // %rax - iframe.vregs_ + num_vregs_ - // %rdi - stack args - // %rsi - num args - // %rdx - func obj - // %r8 - arg0 - // %rbx - method* + // setup regs as follows // %r12 - iframe* - // %r13 - num of rest arguments - movl FRAME_NUM_VREGS_OFFSET(%rax), %ebx - subl %r15d, %ebx - shll $3, %ebx - leaq FRAME_VREGS_OFFSET(%r12,%rbx), %rax - movq %rdi, %rbx // method* - leaq 24(%rbp), %rdi - - movq %rdx, (%rax) - addq $FRAME_VREGISTER_SIZE, %rax - - subl $1, %esi - je .Lrest_args - - movq %rcx, (%rax) - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %esi - je .Lrest_args - - movq %r8, (%rax) - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %esi - je .Lrest_args - - movq %r9, (%rax) - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %esi - je .Lrest_args - -.Lloop: - movq (%rdi), %rdx - addq $FRAME_VREGISTER_SIZE, %rdi - - movq %rdx, (%rax) - addq $FRAME_VREGISTER_SIZE, %rax - - subl $1, %esi - ja .Lloop - -.Lrest_args: - // mov rax to r14 to survive the call - movq %rax, %r14 - // set the first arg Method* - movq %rbx, %rdi - callq GetInitialTaggedValue@plt - - // setup regs as follow - // rax / rdx - initial value / tag - // %rbx - method* - // %r12 - iframe - // %r13 - num rest args - // %r14 - pointer to the current arg vreg - -1: // fill in the reset args - test %r13, %r13 - je .Lcall_interpreter - movq %rax, (%r14) - addq $FRAME_VREGISTER_SIZE, %r14 - subl $1, %r13d - jmp 1b + // %rdi - method* + // %rsi - num_actual_args + + // %r15d - num_iframe_args = max(num_actual_args, mehtod->num_args_) + // %rax - iframe.vregs_ + num_vregs_ + movl METHOD_NUM_ARGS_OFFSET(%rdi), %r15d + cmpl %r15d, %esi + cmovge %esi, %r15d + movl FRAME_NUM_VREGS_OFFSET(%r12), %eax + subl %r15d, %eax + leaq FRAME_VREGS_OFFSET(%r12, %rax, FRAME_VREGISTER_SIZE), %rax + + test %rsi, %rsi + jz .Linit_rest + + // copy actual args + // %rdx - incoming stack arguments + leaq 24(%rbp), %rdx + leal -FRAME_VREGISTER_SIZE(, %esi, FRAME_VREGISTER_SIZE), %r14d +.Lloop_actual: + movq (%rdx, %r14), %rcx + movq %rcx, (%rax, %r14) + subl $FRAME_VREGISTER_SIZE, %r14d + jae .Lloop_actual + +.Linit_rest: + // r15d - num_rest_args = num_iframe_args - num_actual_args + subl %esi, %r15d + jz .Lcall_interpreter + + // init rest args + // %r14 - iframe.vregs_ + num_vregs_ + num_actual_args + // %rax - initial tagged value (TAGGED_VALUE_UNDEFINED) + leaq (%rax, %rsi, FRAME_VREGISTER_SIZE), %r14 + movq $TAGGED_VALUE_UNDEFINED, %rax + leal -FRAME_VREGISTER_SIZE(, %r15d, FRAME_VREGISTER_SIZE), %r15d +.Lloop_rest: + movq %rax, (%r14, %r15) + subl $FRAME_VREGISTER_SIZE, %r15d + jae .Lloop_rest .Lcall_interpreter: // call InterpreterEntryPoint - movq %rbx, %rdi movq %r12, %rsi // void InterpreterEntryPoint(Method *method, Frame* frame); callq InterpreterEntryPoint@plt // handle the result - // setup regs as follow - // %rax - &iframe.acc_ + // setup regs as follows // %r12 - iframe* // %r13/%r14 - result leaq FRAME_ACC_OFFSET(%r12), %rax - movq (%rax), %r13 movq FRAME_ACC_MIRROR_OFFSET(%rax), %r14 @@ -259,14 +209,12 @@ CompiledCodeToInterpreterBridgeDyn: CFI_RESTORE(r15) addq $8, %rsp - // Ecmascript epilogue - // Get EcamscriptEnvironment* +#if defined(PANDA_WITH_ECMASCRIPT) && defined(ARK_INTRINSIC_SET) + // Destroy EcmascriptEnvironment movq MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG), %r12 - // Get previous ecmascript environment - mov 0x18(%r12), %r13 - // Save previous ecmascript environment - mov %r13, MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG) - // End Ecmascript epilogue + movq ECMASCRIPT_ENVIRONMENT_PREV_ENVIRONMENT_OFFSET(%r12), %r12 + movq %r12, MANAGED_THREAD_LANGUAGE_EXTENSION_DATA_OFFSET(%THREAD_REG) +#endif popq %rbp CFI_RESTORE(rbp) diff --git a/runtime/interpreter/dispatch_table.h b/runtime/bridge/arch/amd64/expand_compiled_code_args_dyn_amd64.S similarity index 37% rename from runtime/interpreter/dispatch_table.h rename to runtime/bridge/arch/amd64/expand_compiled_code_args_dyn_amd64.S index fb95c594be698d0df704bb7edcd9fb0281044512..7236e03388ea8e1568bc78fcc7055acf1090ac35 100644 --- a/runtime/interpreter/dispatch_table.h +++ b/runtime/bridge/arch/amd64/expand_compiled_code_args_dyn_amd64.S @@ -12,42 +12,48 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef PANDA_INTERPRETER_DISPATCH_TABLE_H_ -#define PANDA_INTERPRETER_DISPATCH_TABLE_H_ -#include -#include -#include - -#ifdef PANDA_ENABLE_GLOBAL_REGISTER_VARIABLES -#include "arch/global_regs.h" -#endif - -namespace panda::interpreter { - -#ifdef PANDA_ENABLE_GLOBAL_REGISTER_VARIABLES - -ALWAYS_INLINE inline void SetDispatchTable(const void *const *dispatch_table) -{ - arch::regs::SetDispatchTable(dispatch_table); -} - -ALWAYS_INLINE inline const void *const *GetDispatchTable([[maybe_unused]] const void *const *dispatch_table) -{ - return arch::regs::GetDispatchTable(); -} - -#else - -ALWAYS_INLINE inline void SetDispatchTable([[maybe_unused]] const void *const *dispatch_table) {} - -ALWAYS_INLINE inline const void *const *GetDispatchTable(const void *const *dispatch_table) -{ - return dispatch_table; -} - -#endif // PANDA_ENABLE_GLOBAL_REGISTER_VARIABLES - -} // namespace panda::interpreter - -#endif // PANDA_INTERPRETER_DISPATCH_TABLE_H_ +#include "arch/asm_support.h" +#include "arch/amd64/helpers_amd64.S" + +.global ExpandCompiledCodeArgsDyn +TYPE_FUNCTION(ExpandCompiledCodeArgsDyn) +ExpandCompiledCodeArgsDyn: + // %edi - method, preserved + // %esi - num_actual, preserved + // %edx - num_expected, tmp1 + // %rcx - tmp2 + // %r8 - tmp3 + // %r9 - tmp4 + + // %rdx - 8 * (aligned(2) num_expected - num_actual) + // %rcx - num_actual_args + 3 (for fp, lr, lr) + // %r8 - %rsp before moving + subl %esi, %edx + negq %rdx + andq $-2, %rdx + shlq $3, %rdx + leal 3(%esi), %ecx + movq %rsp, %r8 + + // Move sp and fp + addq %rdx, %rsp + addq %rdx, %rbp + +.Lloop_move: + movq (%r8), %r9 + movq %r9, (%r8, %rdx) + addq $8, %r8 + subl $1, %ecx + ja .Lloop_move + + movl $TAGGED_VALUE_UNDEFINED, %r9d + // Use loop counter as index + leaq -8(%r8, %rdx), %r8 + negl %edx +.Lloop_init: + movq %r9, (%r8, %rdx) + subl $8, %edx + ja .Lloop_init + + ret diff --git a/runtime/bridge/arch/amd64/handle_call_imm16_v16_amd64.S b/runtime/bridge/arch/amd64/handle_call_imm16_v16_amd64.S index d4a845d7cd455c87cedd70566d81f165a8e6cd04..bef350055883a07ac0250505f6f4decd4e899d6a 100644 --- a/runtime/bridge/arch/amd64/handle_call_imm16_v16_amd64.S +++ b/runtime/bridge/arch/amd64/handle_call_imm16_v16_amd64.S @@ -23,54 +23,23 @@ // ABI arg reg 1 (rsi) <- num_args movzwl (%rax), %esi - cmpl $0, %esi + test %esi, %esi je .Linvoke - // rbx <- vreg pointed by the insn (user's first argument) - movzwl 2(%rax), %eax - shll $3, %eax - addq %rax, %rbx + // rbx <- range in frame.vregs + movzwl 2(%rax), %r14d + leaq (%rbx, %r14, FRAME_VREGISTER_SIZE), %rbx - // rax <- counter of args - movl %esi, %eax - - // ABI arg reg 2 (rdx) <- boxed arg0 from user's code - movq (%rbx), %rdx - addq $FRAME_VREGISTER_SIZE, %rbx - subl $1, %eax - je .Linvoke - - // ABI arg reg 3 (rcx) <- boxed arg1 from user's code - movq (%rbx), %rcx - addq $FRAME_VREGISTER_SIZE, %rbx - subl $1, %eax - je .Linvoke - - // ABI arg reg 4 (r8) <- boxed arg2 from user's code - movq (%rbx), %r8 - addq $FRAME_VREGISTER_SIZE, %rbx - subl $1, %eax - je .Linvoke - - // ABI arg reg 5 (r9) <- boxed arg3 from user's code - movq (%rbx), %r9 - addq $FRAME_VREGISTER_SIZE, %rbx - subl $1, %eax - je .Linvoke - - // Reserve stack space for the arguments, starting from r13: - movl %eax, %r13d - shll $3, %r13d - subq %r13, %rsp + // Reserve stack args + leal (, %esi, 8), %r14d + subq %r14, %rsp andq $-16, %rsp - movq %rsp, %r13 - // Push the rest arguments to the stack (r14 is temp): -1: movq (%rbx), %r14 - movq %r14, (%r13) - addq $FRAME_VREGISTER_SIZE, %r13 - addq $FRAME_VREGISTER_SIZE, %rbx - subl $1, %eax - ja 1b + leal -FRAME_VREGISTER_SIZE(, %esi, FRAME_VREGISTER_SIZE), %r14d +1: + movq (%rbx, %r14), %r13 + movq %r13, (%rsp, %r14) + subl $FRAME_VREGISTER_SIZE, %r14d + jae 1b jmp .Linvoke diff --git a/runtime/bridge/arch/amd64/handle_call_imm4_v4_v4_v4_amd64.S b/runtime/bridge/arch/amd64/handle_call_imm4_v4_v4_v4_amd64.S index de327b6e4692a45f419c8d521e5eaad347a98264..89ee3fa4dd7fed581754a9b87f2c3f9034f9b5c0 100644 --- a/runtime/bridge/arch/amd64/handle_call_imm4_v4_v4_v4_amd64.S +++ b/runtime/bridge/arch/amd64/handle_call_imm4_v4_v4_v4_amd64.S @@ -20,60 +20,44 @@ // ABI arg reg 0 (rdi) <- panda::Method* movq %r12, %rdi + // load imm4,v4,v4,v4,v4,v4 into %r13d + movl (%rax), %r13d + // ABI arg reg 1 (rsi) <- num_args - // load num args into %rsi and function object into %rdx - movzbl (%rax), %r13d - addq $1, %rax // advance insn_ptr movl %r13d, %esi - andl $0x0F, %esi + andl $0x000F, %esi - cmpl $0, %esi + test %esi, %esi je .Linvoke - // ABI arg reg 2 (rdx) <- boxed arg0 from user's code - andl $0xF0, %r13d - shrl $1, %r13d - movq (%rbx, %r13), %rdx - - cmpl $1, %esi // No more args from user's code? + // Reserve stack args + leal (, %esi, 8), %r14d + subq %r14, %rsp + andq $-16, %rsp + +.macro load_calli_dyn_short_arg i + movl %r13d, %r14d + shrl $(4 * (\i + 1) - 3), %r14d + andl $(0xF << 3), %r14d + movq (%rbx, %r14), %r14 + movq %r14, (8 * \i)(%rsp) +.endm + + load_calli_dyn_short_arg 0 + cmpl $1, %esi je .Linvoke - // Read next insn byte for user's arg1 and arg2: - movzbl (%rax), %r14d - movl %r14d, %r13d - addq $1, %rax // advance insn_ptr - - // ABI arg reg 3 (rcx) <- boxed arg1 from user's code - andl $0x0F, %r14d - shll $3, %r14d - movq (%rbx, %r14), %rcx - cmpl $2, %esi // No more args from user's code? + load_calli_dyn_short_arg 1 + cmpl $2, %esi je .Linvoke - // ABI arg reg 4 (r8) <- boxed arg2 from user's code - andl $0xF0, %r13d - shrl $1, %r13d - movq (%rbx, %r13), %r8 - cmpl $3, %esi // No more args from user's code? + load_calli_dyn_short_arg 2 + cmpl $3, %esi je .Linvoke - // Read next insn byte for user's arg3 and arg4: - movzbl (%rax), %r14d - movl %r14d, %r13d - addq $1, %rax // advance insn_ptr - - // ABI arg reg 5 (r9) <- boxed arg3 from user's code - andl $0x0F, %r14d - shll $3, %r14d - movq (%rbx, %r14), %r9 - cmpl $4, %esi // No more args from user's code? + load_calli_dyn_short_arg 3 + cmpl $4, %esi je .Linvoke - // Stack slot <- boxed arg4 from user's code - subq $8, %rsp - andl $0xF0, %r13d - shrl $1, %r13d - movq (%rbx, %r13), %r13 - movq %r13, (%rsp) - + load_calli_dyn_short_arg 4 jmp .Linvoke diff --git a/runtime/bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S b/runtime/bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S index 7bfb217baa6abe9306ef281d4244a221ba0a7347..fdd31a4bba809f3f9fe9e3dccb331377e33b33bb 100644 --- a/runtime/bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S +++ b/runtime/bridge/arch/amd64/interpreter_to_compiled_code_bridge_dyn_amd64.S @@ -94,6 +94,7 @@ InterpreterToCompiledCodeBridgeDyn: movq METHOD_COMPILED_ENTRY_POINT_OFFSET(%rdi), %rax movq -64(%rbp), %THREAD_REG callq *%rax + // sp may be modified by call // handle the result // setup registers as follow @@ -182,62 +183,29 @@ InvokeCompiledCodeWithArgArrayDyn: movq %rcx, %rdi // No arguments passed to the callee - cmpq $0, %rsi - je .Linvoke_ - - // ebx <- temporary arg counter - movl %esi, %ebx - - // ABI arg reg 2 (rdx) <- boxed arg0 from user's code - movq (%rax), %rdx - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %ebx - cmpl $0, %ebx - je .Linvoke_ - - // ABI arg reg 3 (rcx) <- boxed arg1 from user's code - movq (%rax), %rcx - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %ebx - cmpl $0, %ebx - je .Linvoke_ - - // ABI arg reg 4 (r8) <- boxed arg2 from user's code - movq (%rax), %r8 - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %ebx - cmpl $0, %ebx - je .Linvoke_ - - // ABI arg reg 5 (r9) <- boxed arg3 from user's code - movq (%rax), %r9 - addq $FRAME_VREGISTER_SIZE, %rax - subl $1, %ebx - cmpl $0, %ebx - je .Linvoke_ - - // setup stack args - // reserve stack space - // %r12 - stack poionter - movl %ebx, %r12d - shll $3, %r12d + test %esi, %esi + jz .Linvoke_ + + // Allocate stack args + leal (, %esi, FRAME_VREGISTER_SIZE), %r12d subq %r12, %rsp andq $-16, %rsp - movq %rsp, %r12 // Copy boxed arguments to the stack + // ebx <- loop counter + // rdx <- temp register + leal -FRAME_VREGISTER_SIZE(, %esi, FRAME_VREGISTER_SIZE), %ebx .Lloop_: - movq (%rax), %r10 - addq $FRAME_VREGISTER_SIZE, %rax - movq %r10, (%r12) - addq $FRAME_VREGISTER_SIZE, %r12 - subl $1, %ebx - ja .Lloop_ + movq (%rax, %rbx), %rdx + movq %rdx, (%rsp, %rbx) + subl $FRAME_VREGISTER_SIZE, %ebx + jae .Lloop_ .Linvoke_: // invoke the entrypoint movq METHOD_COMPILED_ENTRY_POINT_OFFSET(%rdi), %rax callq *%rax + // sp may be modified by call leaq -40(%rbp), %rsp popq %rbx diff --git a/verification/type/type_param.cpp b/runtime/bridge/arch/arm/expand_compiled_code_args_dyn_arm.S similarity index 43% rename from verification/type/type_param.cpp rename to runtime/bridge/arch/arm/expand_compiled_code_args_dyn_arm.S index 386aa8a03c350a8e806c60b1c9c395e38ce6b82f..fc37cbf5718e1d53558e820dad45b79a47c94809 100644 --- a/verification/type/type_param.cpp +++ b/runtime/bridge/arch/arm/expand_compiled_code_args_dyn_arm.S @@ -13,35 +13,9 @@ * limitations under the License. */ -#include "type_param.h" -#include "type_system.h" -#include "type_type.h" +#include "arch/asm_support.h" -namespace panda::verifier { - -TypeParam::TypeParam(TypeVariance v, const Type &t) - : TypeParamIdx {v, t.Number()}, kind_ {t.GetTypeSystemKind()}, threadnum_ {t.GetThreadNum()} -{ -} - -TypeParam::TypeParam(TypeSystemKind kind, ThreadNum threadnum, const TypeParamIdx &p) - : TypeParamIdx {p}, kind_ {kind}, threadnum_ {threadnum} -{ -} - -TypeParams TypeParam::operator>>(const TypeParam &p) const -{ - return TypeParams {kind_, threadnum_} >> *this >> p; -} - -TypeParam::operator TypeParams() const // NOLINT(google-explicit-constructor) -{ - return TypeParams {kind_, threadnum_} >> *this; -} - -TypeParam::operator Type() const -{ - return {kind_, threadnum_, TypeParamIdx::GetInt()}; -} - -} // namespace panda::verifier +.global ExpandCompiledCodeArgsDyn +TYPE_FUNCTION(ExpandCompiledCodeArgsDyn) +ExpandCompiledCodeArgsDyn: + bx lr diff --git a/runtime/compiler.cpp b/runtime/compiler.cpp index c88d6c450f9f42300737eaeaf2d817be217ac17d..0a7c1d8654424c0ed993244c03ec247a1a2d1fd7 100644 --- a/runtime/compiler.cpp +++ b/runtime/compiler.cpp @@ -395,6 +395,7 @@ panda::pandasm::LiteralArray PandaRuntimeInterface::GetLiteralArray(MethodPtr m, { auto method = MethodCast(m); auto pfile = method->GetPandaFile(); + id = pfile->GetLiteralArrays()[id]; pandasm::LiteralArray lit_array; panda_file::LiteralDataAccessor lit_array_accessor(*pfile, pfile->GetLiteralArraysId()); @@ -558,14 +559,14 @@ uint32_t PandaRuntimeInterface::GetArrayElementSize(MethodPtr method, IdType id) uintptr_t PandaRuntimeInterface::GetPointerToConstArrayData(MethodPtr method, IdType id) const { auto *pf = MethodCast(method)->GetPandaFile(); - return Runtime::GetCurrent()->GetPointerToConstArrayData(*pf, id); + return Runtime::GetCurrent()->GetPointerToConstArrayData(*pf, pf->GetLiteralArrays()[id]); } size_t PandaRuntimeInterface::GetOffsetToConstArrayData(MethodPtr method, IdType id) const { auto *pf = MethodCast(method)->GetPandaFile(); - auto offset = - Runtime::GetCurrent()->GetPointerToConstArrayData(*pf, id) - reinterpret_cast(pf->GetBase()); + auto offset = Runtime::GetCurrent()->GetPointerToConstArrayData(*pf, pf->GetLiteralArrays()[id]) - + reinterpret_cast(pf->GetBase()); return static_cast(offset); } diff --git a/runtime/entrypoints/entrypoints.cpp b/runtime/entrypoints/entrypoints.cpp index 86dc5bc07708ba66ba60f02c000a6b5d29c052a5..fc4498f3991b7a2873b609053fa979d04f518e03 100644 --- a/runtime/entrypoints/entrypoints.cpp +++ b/runtime/entrypoints/entrypoints.cpp @@ -151,7 +151,7 @@ extern "C" void WriteTlabStatsEntrypoint(size_t size) { LOG_ENTRYPOINT(); - ASSERT(size <= mem::PANDA_TLAB_SIZE); + ASSERT(size <= Thread::GetCurrent()->GetVM()->GetHeapManager()->GetTLABMaxAllocSize()); if (mem::PANDA_TRACK_TLAB_ALLOCATIONS) { auto mem_stats = Thread::GetCurrent()->GetVM()->GetHeapManager()->GetMemStats(); if (mem_stats == nullptr) { @@ -265,14 +265,14 @@ extern "C" ObjectHeader *CloneObjectEntrypoint(ObjectHeader *obj) { BEGIN_ENTRYPOINT(); - uint32_t flags = obj->ClassAddr()->GetFlags(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + uint32_t flags = obj->ClassAddr()->GetFlags(); if (UNLIKELY((flags & Class::IS_CLONEABLE) == 0)) { ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); ThrowCloneNotSupportedException(); HandlePendingException(UnwindPolicy::SKIP_INLINED); return nullptr; } - return ObjectHeader::Clone(obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + return ObjectHeader::Clone(obj); } extern "C" ObjectHeader *PostBarrierWriteEntrypoint(ObjectHeader *obj, size_t size) @@ -298,7 +298,7 @@ extern "C" void CheckCastEntrypoint(const ObjectHeader *obj, Class *klass) // Don't use obj after ClassLinker call because GC can move it. // Since we need only class and class in a non-movalble object // it is ok to get it here. - Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); if (UNLIKELY(obj_klass != nullptr && !klass->IsAssignableFrom(obj_klass))) { panda::ThrowClassCastException(klass, obj_klass); HandlePendingException(); @@ -313,7 +313,7 @@ extern "C" uint8_t IsInstanceEntrypoint(ObjectHeader *obj, Class *klass) // Don't use obj after ClassLinker call because GC can move it. // Since we need only class and class in a non-movalble object // it is ok to get it here. - Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); if (UNLIKELY(obj_klass != nullptr && klass->IsAssignableFrom(obj_klass))) { return 1; } @@ -607,7 +607,7 @@ extern "C" uintptr_t NO_ADDRESS_SANITIZE ResolveVirtualCallEntrypoint(const Meth HandlePendingException(); UNREACHABLE(); } - auto *resolved = obj->ClassAddr()->ResolveVirtualMethod(callee); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + auto *resolved = obj->ClassAddr()->ResolveVirtualMethod(callee); ASSERT(resolved != nullptr); return reinterpret_cast(resolved); @@ -621,7 +621,7 @@ extern "C" uintptr_t NO_ADDRESS_SANITIZE ResolveVirtualCallAotEntrypoint(const M // Don't use obj after ClassLinker call because GC can move it. // Since we need only class and class in a non-movalble object // it is ok to get it here. - auto *obj_klass = obj->ClassAddr(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + auto *obj_klass = obj->ClassAddr(); Method *method = Runtime::GetCurrent()->GetClassLinker()->GetMethod(*caller, panda_file::File::EntityId(callee_id)); if (UNLIKELY(method == nullptr)) { HandlePendingException(); @@ -754,7 +754,7 @@ extern "C" NO_ADDRESS_SANITIZE void ThrowExceptionEntrypoint(ObjectHeader *excep NullPointerExceptionEntrypoint(); UNREACHABLE(); } - ManagedThread::GetCurrent()->SetException(exception); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + ManagedThread::GetCurrent()->SetException(exception); SetExceptionEvent(events::ExceptionType::THROW); HandlePendingException(UnwindPolicy::SKIP_INLINED); @@ -841,7 +841,7 @@ extern "C" NO_ADDRESS_SANITIZE void ClassCastExceptionEntrypoint(Class *inst_cla LOG(DEBUG, INTEROP) << "ClassCastExceptionEntrypoint \n"; ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); ASSERT(src_obj != nullptr); - ThrowClassCastException(inst_class, src_obj->ClassAddr()); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + ThrowClassCastException(inst_class, src_obj->ClassAddr()); SetExceptionEvent(events::ExceptionType::CAST_CHECK); HandlePendingException(UnwindPolicy::SKIP_INLINED); } @@ -895,13 +895,13 @@ extern "C" DecodedTaggedValue GetInitialTaggedValue(Method *method) extern "C" void LockObjectEntrypoint(ObjectHeader *obj) { BEGIN_ENTRYPOINT(); - panda::intrinsics::ObjectMonitorEnter(obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + panda::intrinsics::ObjectMonitorEnter(obj); } extern "C" void LockObjectSlowPathEntrypoint(ObjectHeader *obj) { BEGIN_ENTRYPOINT(); - panda::intrinsics::ObjectMonitorEnter(obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + panda::intrinsics::ObjectMonitorEnter(obj); if (!ManagedThread::GetCurrent()->HasPendingException()) { return; } @@ -913,13 +913,13 @@ extern "C" void LockObjectSlowPathEntrypoint(ObjectHeader *obj) extern "C" void UnlockObjectEntrypoint(ObjectHeader *obj) { BEGIN_ENTRYPOINT(); - panda::intrinsics::ObjectMonitorExit(obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + panda::intrinsics::ObjectMonitorExit(obj); } extern "C" void UnlockObjectSlowPathEntrypoint(ObjectHeader *obj) { BEGIN_ENTRYPOINT(); - panda::intrinsics::ObjectMonitorExit(obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + panda::intrinsics::ObjectMonitorExit(obj); if (!ManagedThread::GetCurrent()->HasPendingException()) { return; } @@ -1076,13 +1076,16 @@ extern "C" ObjectHeader *CreateObjectByIdEntrypoint(ManagedThread *thread, const extern "C" uint8_t CheckCastByBCIDEntrypoint(const Method *caller, ObjectHeader *obj, uint32_t type_id) { CHECK_STACK_WALKER; + auto thread = ManagedThread::GetCurrent(); + [[maybe_unused]] HandleScope scope(thread); + VMHandle handle_obj(thread, obj); auto klass = interpreter::RuntimeInterface::ResolveClass(ManagedThread::GetCurrent(), *caller, BytecodeId(type_id)); if (UNLIKELY(klass == nullptr)) { return 1; } - Class *obj_klass = obj == nullptr ? nullptr : obj->ClassAddr(); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + Class *obj_klass = handle_obj.GetPtr() == nullptr ? nullptr : handle_obj.GetPtr()->ClassAddr(); if (UNLIKELY(obj_klass != nullptr && !klass->IsAssignableFrom(obj_klass))) { panda::ThrowClassCastException(klass, obj_klass); return 1; @@ -1094,7 +1097,8 @@ extern "C" uint8_t IsInstanceByBCIDEntrypoint(const Method *caller, ObjectHeader { CHECK_STACK_WALKER; auto thread = ManagedThread::GetCurrent(); - VMHandle handle_obj(thread, obj); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + [[maybe_unused]] HandleScope scope(thread); + VMHandle handle_obj(thread, obj); auto klass = interpreter::RuntimeInterface::ResolveClass(thread, *caller, BytecodeId(type_id)); if (UNLIKELY(klass == nullptr)) { return 0; @@ -1171,7 +1175,7 @@ extern "C" void ThrowExceptionFromInterpreter(ObjectHeader *exception) { CHECK_STACK_WALKER; ASSERT(!ManagedThread::GetCurrent()->HasPendingException()); - ManagedThread::GetCurrent()->SetException(exception); // SUPPRESS_CSA(alpha.core.WasteObjHeader) + ManagedThread::GetCurrent()->SetException(exception); } extern "C" void ThrowArithmeticExceptionFromInterpreter() diff --git a/runtime/entrypoints/entrypoints.yaml b/runtime/entrypoints/entrypoints.yaml index 373419923d04616be2066eb9a723835a22115a77..7bd954513ac368c5388c881399e8e8cc8137d8e1 100644 --- a/runtime/entrypoints/entrypoints.yaml +++ b/runtime/entrypoints/entrypoints.yaml @@ -164,7 +164,7 @@ entrypoints: signature: - panda::coretypes::Array* - const panda::Method* - - panda::FileEntityId + - uint32_t - name: CreateObjectByClass entrypoint: CreateObjectByClassEntrypoint @@ -592,3 +592,13 @@ entrypoints: signature: - int32_t - uint64_t + +- name: ExpandCompiledCodeArgsDyn + entrypoint: ExpandCompiledCodeArgsDyn + bridge: none + properties: [] + signature: + - void + - panda::Method* + - uint32_t + - uint32_t diff --git a/runtime/include/cframe_iterators.h b/runtime/include/cframe_iterators.h index 31c12a048bdcf907d1ce90c2601035f42f77f548..d8b16001519d9156c72d10963ea2304581604024 100644 --- a/runtime/include/cframe_iterators.h +++ b/runtime/include/cframe_iterators.h @@ -389,42 +389,31 @@ class CFrameDynamicNativeMethodIterator { public: static auto MakeRange(CFrame *cframe) { - size_t arg_regs_count = arch::ExtArchTraits::NUM_GP_ARG_REGS; + size_t constexpr GPR_ARGS_MAX = arch::ExtArchTraits::NUM_GP_ARG_REGS; + size_t constexpr GPR_ARGS_INTERNAL = 2U; // Depends on dyn callconv + size_t constexpr GPR_FN_ARGS_NUM = 0U; // Depends on dyn callconv + CFrameLayout cframe_layout(arch, 0); + + ptrdiff_t const gpr_end_slot = cframe_layout.GetCallerRegsStartSlot() - 1 - cframe_layout.GetStackStartSlot(); + ptrdiff_t const gpr_start_slot = gpr_end_slot + GPR_ARGS_MAX; + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) - Span callers(cframe->GetCallerSaveStack() - arg_regs_count, arg_regs_count); - // In dynamic methods the first two args are Method* method and uint32_t num_args - // Read num_args - auto num_args = static_cast(callers[1]); - size_t num_arg_slots = num_args * sizeof(interpreter::VRegister) / sizeof(SlotType); + Span gpr_slots(cframe->GetValuePtrFromSlot(gpr_start_slot), GPR_ARGS_MAX); + + auto const actual_args_num = static_cast(gpr_slots[1]); + auto const stack_args_num = actual_args_num - GPR_FN_ARGS_NUM; + + ptrdiff_t const gpr_tagged_end_slot = gpr_end_slot + GPR_ARGS_INTERNAL; + ptrdiff_t const gpr_tagged_start_slot = gpr_tagged_end_slot + GPR_FN_ARGS_NUM; + + ptrdiff_t const stack_start_slot = cframe_layout.GetStackArgsStartSlot() - cframe_layout.GetStackStartSlot(); + ptrdiff_t const stack_end_slot = stack_start_slot - stack_args_num; - CFrameLayout cframe_layout(arch, 0); - size_t caller_end_slot = cframe_layout.GetCallerRegsStartSlot(); - size_t caller_start_slot = caller_end_slot + arg_regs_count; - size_t gpr_arg_start_slot = caller_start_slot - 2; // skip Method and num_args, 2 - offset - // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) - if constexpr (arch != Arch::X86_64) { - gpr_arg_start_slot = RoundDown(gpr_arg_start_slot, sizeof(interpreter::VRegister) / sizeof(SlotType)); - } - size_t gpr_arg_end_slot = std::max(caller_end_slot, gpr_arg_start_slot - num_arg_slots); - size_t num_gpr_arg_slots = std::min(gpr_arg_start_slot - caller_end_slot, num_arg_slots); - - size_t num_stack_arg_slots = num_arg_slots - num_gpr_arg_slots; - ptrdiff_t stack_arg_start_slot = cframe_layout.GetStackArgsStartSlot(); - ptrdiff_t stack_arg_end_slot = stack_arg_start_slot - num_stack_arg_slots; - - // Since all stack slots are calculated relative STACK_START_SLOT - // subtract it from each value - ptrdiff_t stack_start_slot = cframe_layout.GetStackStartSlot(); - gpr_arg_start_slot -= stack_start_slot; - gpr_arg_end_slot -= stack_start_slot; - caller_end_slot -= stack_start_slot; - stack_arg_start_slot -= stack_start_slot; - stack_arg_end_slot -= stack_start_slot; return Range( - CFrameDynamicNativeMethodIterator(cframe, gpr_arg_start_slot - 1, caller_end_slot - 1, stack_arg_start_slot, - stack_arg_end_slot), - CFrameDynamicNativeMethodIterator(cframe, gpr_arg_end_slot - 1, caller_end_slot - 1, stack_arg_end_slot, - stack_arg_end_slot)); + CFrameDynamicNativeMethodIterator(cframe, gpr_tagged_start_slot, gpr_tagged_end_slot, stack_start_slot, + stack_end_slot), + CFrameDynamicNativeMethodIterator(cframe, gpr_tagged_end_slot, gpr_tagged_end_slot, stack_end_slot, + stack_end_slot)); } VRegInfo operator*() diff --git a/runtime/include/managed_thread.h b/runtime/include/managed_thread.h index 5c825a907dbc90dc8110986d5451aa319d37d015..73d56c0d49ca8da1956f2827d1759257439398ea 100644 --- a/runtime/include/managed_thread.h +++ b/runtime/include/managed_thread.h @@ -587,24 +587,28 @@ public: return STACK_OVERFLOW_RESERVED_SIZE; } + template void *const *GetDebugDispatchTable() const { - return debug_dispatch_table_; + return const_cast(GetOrSetInnerDebugDispatchTable()); } + template void SetDebugDispatchTable(const void *const *dispatch_table) { - debug_dispatch_table_ = const_cast(dispatch_table); + GetOrSetInnerDebugDispatchTable(true, dispatch_table); } + template void *const *GetCurrentDispatchTable() const { - return current_dispatch_table_; + return const_cast(GetOrSetInnerDispatchTable()); } + template void SetCurrentDispatchTable(const void *const *dispatch_table) { - current_dispatch_table_ = const_cast(dispatch_table); + GetOrSetInnerDispatchTable(true, dispatch_table); } void SuspendImpl(bool internal_suspend = false); @@ -788,6 +792,28 @@ private: } } + template + NO_OPTIMIZE const void *const *GetOrSetInnerDebugDispatchTable(bool set = false, + const void *const *dispatch_table = nullptr) const + { + thread_local static const void *const *current_debug_dispatch_table = nullptr; + if (set) { + current_debug_dispatch_table = dispatch_table; + } + return current_debug_dispatch_table; + } + + template + NO_OPTIMIZE const void *const *GetOrSetInnerDispatchTable(bool set = false, + const void *const *dispatch_table = nullptr) const + { + thread_local static const void *const *current_dispatch_table = nullptr; + if (set) { + current_dispatch_table = dispatch_table; + } + return current_dispatch_table; + } + virtual bool TestLockState() const; static constexpr uint32_t THREAD_STATUS_OFFSET = 16; @@ -873,10 +899,6 @@ private: // Used in mathod events uint32_t call_depth_ {0}; - void *const *debug_dispatch_table_ {nullptr}; - - void *const *current_dispatch_table_ {nullptr}; - NO_COPY_SEMANTIC(ManagedThread); NO_MOVE_SEMANTIC(ManagedThread); }; diff --git a/runtime/include/mem/allocator.h b/runtime/include/mem/allocator.h index 6e2d5288fe40f8d057970eac927c5b7db2e620ee..ec8f1e1114786aa26f5248f402c7a277b93b1ea7 100644 --- a/runtime/include/mem/allocator.h +++ b/runtime/include/mem/allocator.h @@ -766,7 +766,7 @@ private: template class ObjectAllocatorGen final : public ObjectAllocatorGenBase { // TODO(dtrubenkov): create a command line argument for this - static constexpr size_t YOUNG_TLAB_SIZE = 4_KB; // TLAB size for young gen + static constexpr size_t DEFAULT_YOUNG_TLAB_SIZE = 4_KB; // TLAB size for young gen using YoungGenAllocator = BumpPointerAllocator, true>; @@ -886,6 +886,7 @@ private: MemStatsType *mem_stats_ = nullptr; ObjectAllocator *non_movable_object_allocator_ = nullptr; LargeObjectAllocator *large_non_movable_object_allocator_ = nullptr; + size_t tlab_size_ = DEFAULT_YOUNG_TLAB_SIZE; template void *AllocateTenuredImpl(size_t size); diff --git a/runtime/include/runtime.h b/runtime/include/runtime.h index 0870745498ea38ecd5edbc6109fd29a53e3d4e36..699630ea54fa4415aa18c2a1afaa7f55800aac11 100644 --- a/runtime/include/runtime.h +++ b/runtime/include/runtime.h @@ -51,6 +51,7 @@ #include "libpandabase/os/native_stack.h" #include "libpandabase/os/library_loader.h" #include "runtime/include/loadable_agent.h" +#include "runtime/tooling/tools.h" namespace panda { @@ -416,6 +417,11 @@ public: return relayout_profiler_; } + inline tooling::Tools &GetTools() + { + return tools_; + } + private: void NotifyAboutLoadedModules(); @@ -521,6 +527,7 @@ private: UnwindStackFn unwind_stack_fn_ {nullptr}; RelayoutProfiler *relayout_profiler_ {nullptr}; + tooling::Tools tools_; NO_COPY_SEMANTIC(Runtime); NO_MOVE_SEMANTIC(Runtime); diff --git a/runtime/interpreter/instruction_handler_base.h b/runtime/interpreter/instruction_handler_base.h index afa3b11d4e57e5c7f57e7e7ec3056d1ffb342222..d6e528e13f15df6f0d68cc01ece535c1625b08d5 100644 --- a/runtime/interpreter/instruction_handler_base.h +++ b/runtime/interpreter/instruction_handler_base.h @@ -211,6 +211,11 @@ public: return state_->GetThread(); } + ALWAYS_INLINE const void *const *GetDispatchTable() const + { + return state_->GetDispatchTable(); + } + protected: template ALWAYS_INLINE void MoveToNextInst() @@ -258,11 +263,6 @@ protected: state_->SetInst(inst); } - ALWAYS_INLINE const void *const *GetDispatchTable() const - { - return state_->GetDispatchTable(); - } - ALWAYS_INLINE void SetDispatchTable(const void *const *dispatch_table) { return state_->SetDispatchTable(dispatch_table); diff --git a/runtime/interpreter/instruction_handler_state.h b/runtime/interpreter/instruction_handler_state.h index 045e0d954209649fea32c3175f46f77ff899fdc4..a1d12679caed9d029662357d0eaa9149c0a3fcac 100644 --- a/runtime/interpreter/instruction_handler_state.h +++ b/runtime/interpreter/instruction_handler_state.h @@ -24,8 +24,9 @@ namespace panda::interpreter { class InstructionHandlerState { public: // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) - ALWAYS_INLINE InstructionHandlerState(ManagedThread *thread, const uint8_t *pc, Frame *frame) - : state_(thread, pc, frame) + ALWAYS_INLINE InstructionHandlerState(ManagedThread *thread, const uint8_t *pc, Frame *frame, + const void *const *dispatch_table) + : state_(thread, pc, frame, dispatch_table) { instructions_ = GetFrame()->GetInstruction(); } diff --git a/runtime/interpreter/interpreter-inl.h b/runtime/interpreter/interpreter-inl.h index 89bc297cf16bd74f2e19d8250b0dc28b19c6fd63..f2aac7ef800a5b7580b9a040c5417785c9e97b88 100644 --- a/runtime/interpreter/interpreter-inl.h +++ b/runtime/interpreter/interpreter-inl.h @@ -51,7 +51,6 @@ #include "runtime/include/value-inl.h" #include "runtime/interpreter/acc_vregister.h" #include "runtime/interpreter/arch/macros.h" -#include "runtime/interpreter/dispatch_table.h" #include "runtime/interpreter/frame.h" #include "runtime/interpreter/instruction_handler_base.h" #include "runtime/interpreter/math_helpers.h" @@ -109,7 +108,7 @@ public: } }; -template +template class InstructionHandler : public InstructionHandlerBase { public: ALWAYS_INLINE inline explicit InstructionHandler(InstructionHandlerState *state) @@ -2015,7 +2014,8 @@ public: // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) prev->GetInstruction() + prev->GetBytecodeOffset(), prev); - this->SetDispatchTable(this->GetThread()->GetCurrentDispatchTable()); + this->SetDispatchTable( + this->GetThread()->template GetCurrentDispatchTable()); RuntimeIfaceT::SetCurrentFrame(thread, prev); if (UNLIKELY(this->GetThread()->HasPendingException())) { @@ -3055,7 +3055,8 @@ public: frame->SetInitobj(); } frame->SetInstruction(instructions); - this->SetDispatchTable(this->GetThread()->GetCurrentDispatchTable()); + this->SetDispatchTable( + this->GetThread()->template GetCurrentDispatchTable()); this->template MoveToNextInst(); this->GetFrame()->SetNextInstruction(this->GetInst()); this->GetInstructionHandlerState()->UpdateInstructionHandlerState(instructions, frame); diff --git a/runtime/interpreter/runtime_interface.h b/runtime/interpreter/runtime_interface.h index f729ff651bfc611009e5e096d24675613033b081..03756dd8161824fa2b47130ca9c2bf45d8fd037f 100644 --- a/runtime/interpreter/runtime_interface.h +++ b/runtime/interpreter/runtime_interface.h @@ -122,7 +122,7 @@ public: static coretypes::Array *ResolveLiteralArray(PandaVM *vm, const Method &caller, BytecodeId id) { - return Runtime::GetCurrent()->ResolveLiteralArray(vm, caller, id.AsFileId().GetOffset()); + return Runtime::GetCurrent()->ResolveLiteralArray(vm, caller, id.AsIndex()); } static uint32_t GetCompilerHotnessThreshold() diff --git a/runtime/interpreter/state.h b/runtime/interpreter/state.h index 64263a2c8bab50a13b0edfb87b8029ec00dfcead..a87ff6d348ba39ce0641e951129f5459d4715883 100644 --- a/runtime/interpreter/state.h +++ b/runtime/interpreter/state.h @@ -89,11 +89,14 @@ private: class State : public StateIface { public: - ALWAYS_INLINE inline State(ManagedThread *thread, const uint8_t *pc, Frame *frame) : StateIface(frame) + ALWAYS_INLINE inline State(ManagedThread *thread, const uint8_t *pc, Frame *frame, + const void *const *dispatch_table) + : StateIface(frame) { SetInst(BytecodeInstruction(pc)); SetFrame(frame); SetThread(thread); + SetDispatchTable(dispatch_table); } ALWAYS_INLINE inline void UpdateState(const uint8_t *pc, Frame *frame) @@ -174,11 +177,14 @@ private: class State : public StateIface { public: - ALWAYS_INLINE inline State(ManagedThread *thread, const uint8_t *pc, Frame *frame) : StateIface(frame) + ALWAYS_INLINE inline State(ManagedThread *thread, const uint8_t *pc, Frame *frame, + const void *const *dispatch_table) + : StateIface(frame) { SetInst(BytecodeInstruction(pc)); SetFrame(frame); SetThread(thread); + SetDispatchTable(dispatch_table); } ALWAYS_INLINE inline void UpdateState(const uint8_t *pc, Frame *frame) @@ -209,7 +215,7 @@ public: ALWAYS_INLINE inline const void *const *GetDispatchTable() const { - return nullptr; + return dispatch_table_; } ALWAYS_INLINE inline ManagedThread *GetThread() const @@ -222,7 +228,10 @@ public: thread_ = thread; } - void SetDispatchTable([[maybe_unused]] const void *const *dispatch_table) {} + void SetDispatchTable(const void *const *dispatch_table) + { + dispatch_table_ = dispatch_table; + } void SaveState() {} @@ -232,6 +241,7 @@ private: BytecodeInstruction inst_; Frame *frame_ {nullptr}; ManagedThread *thread_ {nullptr}; + const void *const *dispatch_table_ {nullptr}; }; #endif // PANDA_ENABLE_GLOBAL_REGISTER_VARIABLES diff --git a/runtime/interpreter/templates/debug_test_interpreter-inl_gen.cpp.erb b/runtime/interpreter/templates/debug_test_interpreter-inl_gen.cpp.erb index 39839a3b7580d92876ac176d5866fd4321425fb1..970921bc533568b10e50c044ac1856c70c972ad8 100644 --- a/runtime/interpreter/templates/debug_test_interpreter-inl_gen.cpp.erb +++ b/runtime/interpreter/templates/debug_test_interpreter-inl_gen.cpp.erb @@ -33,14 +33,14 @@ namespace panda::interpreter { % end % mnemonic = i.mnemonic.split('.').map { |p| p == '64' ? 'Wide' : p.capitalize }.join -void CSA_BUILD_ANALYZE_ONLY_DO_NOT_CALL_DYNAMIC_<%= i.handler_name %>(ManagedThread *thread, const uint8_t *pc, Frame *frame) { - InstructionHandlerState state(thread, pc, frame); +void CSA_BUILD_ANALYZE_ONLY_DO_NOT_CALL_DYNAMIC_<%= i.handler_name %>(ManagedThread *thread, const uint8_t *pc, Frame *frame, const void *const *dispatch_table) { + InstructionHandlerState state(thread, pc, frame, dispatch_table); <%= namespace %>InstructionHandler handler(&state); handler.template Handle<%= mnemonic %>>(); } -void CSA_BUILD_ANALYZE_ONLY_DO_NOT_CALL_STATIC_<%= i.handler_name %>(ManagedThread *thread, const uint8_t *pc, Frame *frame) { - InstructionHandlerState state(thread, pc, frame); +void CSA_BUILD_ANALYZE_ONLY_DO_NOT_CALL_STATIC_<%= i.handler_name %>(ManagedThread *thread, const uint8_t *pc, Frame *frame, const void *const *dispatch_table) { + InstructionHandlerState state(thread, pc, frame, dispatch_table); <%= namespace %>InstructionHandler handler(&state); handler.template Handle<%= mnemonic %>>(); } diff --git a/runtime/interpreter/templates/interpreter-inl_gen.h.erb b/runtime/interpreter/templates/interpreter-inl_gen.h.erb index 46e47c4c5e078f065faa4069fecc7992db4d34b1..bdce47a073b1351709f646b0928cf788edc21ec9 100644 --- a/runtime/interpreter/templates/interpreter-inl_gen.h.erb +++ b/runtime/interpreter/templates/interpreter-inl_gen.h.erb @@ -21,12 +21,10 @@ namespace panda::interpreter { % [[:nodebug, 'HANDLE_'], [:debug, 'DEBUG_HANDLE_']].each do |mode, prefix| template // NOLINTNEXTLINE(readability-function-size) -% if mode == :nodebug -void ExecuteImpl(ManagedThread *thread, const uint8_t *pc, Frame *frame) { -% else +% if mode == :debug void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { -% end -% if mode == :nodebug +% else +void ExecuteImpl(ManagedThread *thread, const uint8_t *pc, Frame *frame) { if (UNLIKELY(Runtime::GetCurrent()->IsDebugMode())) { ExecuteImplDebug(thread, pc, frame); return; @@ -63,6 +61,35 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { static constexpr uint32_t DISPATCH_TABLE_LEN = 256 + NUM_PREFIXED + 1; +% Panda.quickened_plugins.each_key do |namespace| +% if mode == :nodebug + static std::array quick_<%= namespace %>_inst_dispatch_table { +% Quick::select[namespace].each do |ins| + &&HANDLE_<%= ins.handler_name %>, +% end +% n = Panda::dispatch_table.handler_names.size - Quick::select[namespace].size +% (0..n-1).each do || + &&HANDLE_INVALID, +% end + &&EXCEPTION_HANDLER, + }; + + static std::array quick_<%= namespace %>_debug_dispatch_table { +% Quick::select[namespace].each do |ins| + &&HANDLE_DEBUG_SWITCH, +% end + &&EXCEPTION_HANDLER, + }; +% else + static std::array quick_<%= namespace %>_inst_dispatch_table { +% Quick::select[namespace].each do |ins| + &&DEBUG_HANDLE_<%= ins.handler_name %>, +% end + &&DEBUG_EXCEPTION_HANDLER, + }; +% end +% end + % if mode == :nodebug static std::array inst_dispatch_table { % Panda::dispatch_table.handler_names.each do |name| @@ -70,6 +97,14 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { % end &&EXCEPTION_HANDLER, }; + + static std::array debug_dispatch_table { +% Panda::dispatch_table.handler_names.each do || + &&HANDLE_DEBUG_SWITCH, +% end + &&EXCEPTION_HANDLER, + }; + % else static std::array inst_dispatch_table { % Panda::dispatch_table.handler_names.each do |name| @@ -79,21 +114,48 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { }; % end - auto *dispatch_table = inst_dispatch_table.data(); - SetDispatchTable(dispatch_table); - thread->SetCurrentDispatchTable(dispatch_table); + constexpr bool is_debug = <%= mode == :debug %>; + + if (UNLIKELY((thread->GetCurrentDispatchTable()) == nullptr)) { + if (frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag) { + switch (frame->GetMethod()->GetClass()->GetSourceLang()) { +% Panda.quickened_plugins.each_key do |namespace| + case panda_file::SourceLang::<%= namespace.upcase %>: + thread->SetCurrentDispatchTable(quick_<%= namespace %>_inst_dispatch_table.data()); % if mode == :debug - thread->SetDebugDispatchTable(inst_dispatch_table.data()); + thread->SetDebugDispatchTable(quick_<%= namespace %>_inst_dispatch_table.data()); +% else + thread->SetDebugDispatchTable(quick_<%= namespace %>_debug_dispatch_table.data()); +% end + break; % end + default: + thread->SetCurrentDispatchTable(inst_dispatch_table.data()); +% if mode == :debug + thread->SetCurrentDispatchTable(inst_dispatch_table.data()); +% else + thread->SetDebugDispatchTable(debug_dispatch_table.data()); +% end + break; + } + } else { + thread->SetCurrentDispatchTable(inst_dispatch_table.data()); +% if mode == :debug + thread->SetDebugDispatchTable(inst_dispatch_table.data()); +% else + thread->SetDebugDispatchTable(debug_dispatch_table.data()); +% end + } + } - InstructionHandlerState state(thread, pc, frame); + InstructionHandlerState state(thread, pc, frame, thread->GetCurrentDispatchTable()); if constexpr (jump_to_eh) { - goto* dispatch_table[DISPATCH_TABLE_LEN - 1]; + goto* state.GetDispatchTable()[DISPATCH_TABLE_LEN - 1]; } - ASSERT(state.IsPrimaryOpcodeValid()); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag || state.IsPrimaryOpcodeValid()); const void *label; - DISPATCH(GetDispatchTable(dispatch_table), state.GetPrimaryOpcode(), label); + DISPATCH(state.GetDispatchTable(), state.GetPrimaryOpcode(), label); % Panda::instructions.each do |i| % mnemonic = i.mnemonic.split('.').map { |p| p == '64' ? 'Wide' : p.capitalize }.join @@ -102,7 +164,7 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { % if i.namespace != 'core' #ifdef PANDA_WITH_<%= i.namespace.upcase %> % end - <%= namespace %>InstructionHandler handler(&state); + <%= namespace %>InstructionHandler handler(&state); % if mode == :debug if (handler.GetFrame()->IsForcePop() || handler.GetFrame()->IsRetryInstruction()) { goto INSTRUMENT_FRAME_HANDLER; @@ -115,18 +177,21 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { % if i.properties.include?('return') if (handler.GetFrame()->IsStackless()) { handler.HandleReturnStackless(); - ASSERT(handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); - DISPATCH(GetDispatchTable(dispatch_table), handler.GetExceptionOpcode(), label); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); + DISPATCH(state.GetDispatchTable(), handler.GetExceptionOpcode(), label); } else { return; } % else % if !i.exceptions.include?('x_none') - ASSERT(handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); - DISPATCH(GetDispatchTable(dispatch_table), handler.GetExceptionOpcode(), label); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); + DISPATCH(state.GetDispatchTable(), handler.GetExceptionOpcode(), label); % else - ASSERT(handler.IsPrimaryOpcodeValid()); - DISPATCH(GetDispatchTable(dispatch_table), handler.GetPrimaryOpcode(), label); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid()); + DISPATCH(state.GetDispatchTable(), handler.GetPrimaryOpcode(), label); % end % end % if i.namespace != 'core' @@ -146,9 +211,53 @@ void ExecuteImplDebug(ManagedThread *thread, const uint8_t *pc, Frame *frame) { ASSERT(secondary_opcode <= <%= Panda::dispatch_table.secondary_opcode_bound(p) %>); const size_t dispatch_idx = <%= Panda::dispatch_table.secondary_opcode_offset(p) %> + secondary_opcode; ASSERT(dispatch_idx < DISPATCH_TABLE_LEN); - DISPATCH(GetDispatchTable(dispatch_table), dispatch_idx, label); + DISPATCH(state.GetDispatchTable(), dispatch_idx, label); +} +% end + +% # quickened handlers +% Panda.quickened_plugins.each_key do |namespace| +% Quick.select[namespace].select { |instr| instr.namespace == namespace }.each do |i| +% mnemonic = i.mnemonic.split('.').map { |p| p == '64' ? 'Wide' : p.capitalize }.join +<%= prefix %><%= i.handler_name %>: { +% namespace = i.namespace == 'core' ? '' : "#{i.namespace}::" +% if i.namespace != 'core' +#ifdef PANDA_WITH_<%= i.namespace.upcase %> +% end + <%= namespace %>InstructionHandler handler(&state); +% if mode == :debug + INSTRUMENT_FRAME(); + handler.InstrumentInstruction(); + handler.GetFrame()->GetAcc() = handler.GetAcc(); +% end + handler.DumpVRegs(); + handler.template Handle<%= mnemonic %>>(); +% if i.properties.include?('return') + if (handler.GetFrame()->IsStackless()) { + handler.HandleReturnStackless(); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); + DISPATCH(state.GetDispatchTable(), handler.GetExceptionOpcode(), label); + } else { + return; + } +% else +% if !i.exceptions.include?('x_none') + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid() || (handler.GetExceptionOpcode() == UINT8_MAX + NUM_PREFIXED + 1)); + DISPATCH(state.GetDispatchTable(), handler.GetExceptionOpcode(), label); +% else + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag + || handler.IsPrimaryOpcodeValid()); + DISPATCH(state.GetDispatchTable(), handler.GetPrimaryOpcode(), label); +% end +% end +% if i.namespace != 'core' +#endif // PANDA_WITH_<%= i.namespace.upcase %> +% end } % end +% end % if mode == :debug DEBUG_EXCEPTION_HANDLER: { @@ -157,7 +266,7 @@ EXCEPTION_HANDLER: { % end ASSERT(thread->HasPendingException()); - InstructionHandler handler(&state); + InstructionHandler handler(&state); uint32_t pc_offset = panda_file::INVALID_OFFSET; % if mode == :debug @@ -184,15 +293,15 @@ EXCEPTION_HANDLER: { % end Span sp(handler.GetFrame()->GetMethod()->GetInstructions(), pc_offset); - state = InstructionHandlerState(thread, sp.cend(), handler.GetFrame()); + state = InstructionHandlerState(thread, sp.cend(), handler.GetFrame(), handler.GetDispatchTable()); - ASSERT(state.IsPrimaryOpcodeValid()); - goto* dispatch_table[state.GetPrimaryOpcode()]; + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag || state.IsPrimaryOpcodeValid()); + goto* state.GetDispatchTable()[state.GetPrimaryOpcode()]; } % if mode == :debug INSTRUMENT_FRAME_HANDLER: { - InstructionHandler handler(&state); + InstructionHandler handler(&state); ASSERT(handler.GetFrame()->IsForcePop() || handler.GetFrame()->IsRetryInstruction()); @@ -201,8 +310,8 @@ INSTRUMENT_FRAME_HANDLER: { handler.InstrumentForceReturn(); if (handler.GetFrame()->IsStackless()) { handler.HandleInstrumentForceReturn(); - ASSERT(handler.IsPrimaryOpcodeValid()); - DISPATCH(GetDispatchTable(dispatch_table), handler.GetPrimaryOpcode(), label); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag || handler.IsPrimaryOpcodeValid()); + DISPATCH(handler.GetDispatchTable(), handler.GetPrimaryOpcode(), label); } else { return; } @@ -210,9 +319,18 @@ INSTRUMENT_FRAME_HANDLER: { handler.GetFrame()->ClearRetryInstruction(); auto* method = handler.GetFrame()->GetMethod(); - state = InstructionHandlerState(thread, method->GetInstructions() + handler.GetFrame()->GetBytecodeOffset(), handler.GetFrame()); - ASSERT(state.IsPrimaryOpcodeValid()); - goto* dispatch_table[state.GetPrimaryOpcode()]; + state = InstructionHandlerState(thread, method->GetInstructions() + handler.GetFrame()->GetBytecodeOffset(), handler.GetFrame(), handler.GetDispatchTable()); + ASSERT(frame->GetMethod()->GetPandaFile()->GetHeader()->quickened_flag || state.IsPrimaryOpcodeValid()); + goto* state.GetDispatchTable()[state.GetPrimaryOpcode()]; +} +% end + +% if mode == :nodebug +HANDLE_DEBUG_SWITCH: { + state.GetFrame()->GetAcc() = state.GetAcc(); + ExecuteImplDebug(state.GetThread(), state.GetInst().GetAddress(), + state.GetFrame()); + return; } % end diff --git a/runtime/mem/allocator.cpp b/runtime/mem/allocator.cpp index 473a01ea4ca1203877b66f67620e7d52307a7309..09b61de183d87a0334d47f97af641a2a18123869 100644 --- a/runtime/mem/allocator.cpp +++ b/runtime/mem/allocator.cpp @@ -425,10 +425,17 @@ ObjectAllocatorGen::ObjectAllocatorGen(MemStatsType *mem_stats, bool cr size_t young_space_size = heap_spaces_.GetCurrentMaxYoungSize(); auto young_shared_space_size = Runtime::GetOptions().GetYoungSharedSpaceSize(); ASSERT(young_space_size >= young_shared_space_size); - auto tlabs_count_in_young_gen = (young_space_size - young_shared_space_size) / YOUNG_TLAB_SIZE; - ASSERT(((young_space_size - young_shared_space_size) % YOUNG_TLAB_SIZE) == 0); - ASSERT(YOUNG_ALLOC_MAX_SIZE <= YOUNG_TLAB_SIZE); - ASSERT(tlabs_count_in_young_gen * YOUNG_TLAB_SIZE <= young_space_size); + size_t tlabs_count_in_young_gen; + if constexpr (MT_MODE == MT_MODE_SINGLE) { + // For single-threaded VMs allocate whole private young space for TLAB + tlab_size_ = young_space_size - young_shared_space_size; + tlabs_count_in_young_gen = 1; + } else { + tlabs_count_in_young_gen = (young_space_size - young_shared_space_size) / DEFAULT_YOUNG_TLAB_SIZE; + ASSERT(((young_space_size - young_shared_space_size) % DEFAULT_YOUNG_TLAB_SIZE) == 0); + } + ASSERT(YOUNG_ALLOC_MAX_SIZE <= tlab_size_); + ASSERT(tlabs_count_in_young_gen * tlab_size_ <= young_space_size); // TODO(aemelenko): Missed an allocator pointer // because we construct BumpPointer Allocator after calling AllocArena method @@ -533,7 +540,7 @@ void ObjectAllocatorGen::ResetYoungAllocator() template TLAB *ObjectAllocatorGen::CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread) { - return young_gen_allocator_->CreateNewTLAB(YOUNG_TLAB_SIZE); + return young_gen_allocator_->CreateNewTLAB(tlab_size_); } template diff --git a/runtime/mem/allocator_adapter.h b/runtime/mem/allocator_adapter.h index 552fd17dde75bde7f3b792f2ea6f4c9306e83a18..442dd7b8d016fd1d1d4fc34ddfed492256956024 100644 --- a/runtime/mem/allocator_adapter.h +++ b/runtime/mem/allocator_adapter.h @@ -25,10 +25,10 @@ class AllocatorAdapter; template class AllocatorAdapter { public: - // NOLINTNEXTLINE(readability-identifier-naming) - using value_type = void; - using Pointer = void *; - using ConstPointer = const void *; + // Naming is not by code style because we need to have allocator traits compatibility. Don't change it. + using value_type = void; // NOLINT(readability-identifier-naming) + using pointer = void *; // NOLINT(readability-identifier-naming) + using const_pointer = const void *; // NOLINT(readability-identifier-naming) template struct Rebind { @@ -76,14 +76,14 @@ private: template class AllocatorAdapter { public: - // NOLINTNEXTLINE(readability-identifier-naming) - using value_type = T; - using Pointer = T *; - using Reference = T &; - using ConstPointer = const T *; - using ConstReference = const T &; - using SizeType = size_t; - using DifferenceType = ptrdiff_t; + // Naming is not by code style because we need to have allocator traits compatibility. Don't change it. + using value_type = T; // NOLINT(readability-identifier-naming) + using pointer = T *; // NOLINT(readability-identifier-naming) + using reference = T &; // NOLINT(readability-identifier-naming) + using const_pointer = const T *; // NOLINT(readability-identifier-naming) + using const_reference = const T &; // NOLINT(readability-identifier-naming) + using size_type = size_t; // NOLINT(readability-identifier-naming) + using difference_type = ptrdiff_t; // NOLINT(readability-identifier-naming) template struct Rebind { @@ -122,7 +122,7 @@ public: ~AllocatorAdapter() = default; // NOLINTNEXTLINE(readability-identifier-naming) - Pointer allocate(SizeType size, [[maybe_unused]] const void *hint = nullptr) + pointer allocate(size_type size, [[maybe_unused]] const void *hint = nullptr) { // NOLINTNEXTLINE(bugprone-suspicious-semicolon, readability-braces-around-statements) if constexpr (ALLOC_SCOPE_T == AllocScope::GLOBAL) { @@ -134,7 +134,7 @@ public: } // NOLINTNEXTLINE(readability-identifier-naming) - void deallocate(Pointer ptr, [[maybe_unused]] SizeType size) + void deallocate(pointer ptr, [[maybe_unused]] size_type size) { allocator_->Free(ptr); } diff --git a/runtime/mem/gc/g1/g1-allocator.cpp b/runtime/mem/gc/g1/g1-allocator.cpp index 81802a527f4a5ec8070128f65c49c4d2024b752b..691474da00b466d61b9107f325939dd575198e43 100644 --- a/runtime/mem/gc/g1/g1-allocator.cpp +++ b/runtime/mem/gc/g1/g1-allocator.cpp @@ -95,14 +95,23 @@ std::vector &ObjectAllocatorG1::GetYoungSpaceBitmaps() template TLAB *ObjectAllocatorG1::CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread) { - // TODO(dtrubenkov): fix this - return object_allocator_->CreateNewTLAB(thread, TLAB_SIZE); + if constexpr (MT_MODE == MT_MODE_SINGLE) { + // For single-threaded VMs allocate a whole region for TLAB + return object_allocator_->CreateRegionSizeTLAB(); + } else { + return object_allocator_->CreateTLAB(TLAB_SIZE); + } } template size_t ObjectAllocatorG1::GetTLABMaxAllocSize() { - return PANDA_TLAB_MAX_ALLOC_SIZE; + if constexpr (MT_MODE == MT_MODE_SINGLE) { + // For single-threaded VMs we can allocate objects of size up to region size in TLABs. + return GetYoungAllocMaxSize(); + } else { + return PANDA_TLAB_MAX_ALLOC_SIZE; + } } template diff --git a/runtime/mem/gc/g1/g1-gc.cpp b/runtime/mem/gc/g1/g1-gc.cpp index 09705f63bd47b3387302c278244b1dc9151d68bf..aa795fc81d665484a1070fc6b362a2409329f6d1 100644 --- a/runtime/mem/gc/g1/g1-gc.cpp +++ b/runtime/mem/gc/g1/g1-gc.cpp @@ -13,6 +13,7 @@ * limitations under the License. */ +#include "include/mem/panda_string.h" #include "runtime/include/panda_vm.h" #include "runtime/mem/gc/card_table-inl.h" #include "runtime/mem/gc/dynamic/gc_marker_dynamic-inl.h" @@ -280,13 +281,18 @@ void G1GC::CollectRefsFromMemRange(const MemRange &mem_range, Re ToVoidPtr(mem_range.GetEndAddress()), visitor); } -template -template -void G1GC::CollectNonRegularObjects(GCTask &task) -{ - size_t delete_size = 0; - size_t delete_count = 0; - auto death_checker = [&delete_size, &delete_count](ObjectHeader *object_header) { +template +class NonRegularObjectsDeathChecker { +public: + NonRegularObjectsDeathChecker(size_t *delete_size, size_t *delete_count) + : delete_size_(delete_size), delete_count_(delete_count) + { + } + + ~NonRegularObjectsDeathChecker() = default; + + ObjectStatus operator()(ObjectHeader *object_header) + { // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) if constexpr (CONCURRENTLY) { // We may face a newly created object without live bitmap initialization. @@ -299,18 +305,48 @@ void G1GC::CollectNonRegularObjects(GCTask &task) if (live_bitmap->AtomicTest(object_header)) { return ObjectStatus::ALIVE_OBJECT; } + if constexpr (!COLLECT_CLASSES) { + if (ObjectHelpers::IsClassObject(object_header)) { + LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE class object " << object_header + << " but don't free memory"; + return ObjectStatus::ALIVE_OBJECT; + } + } if (region->HasFlag(RegionFlag::IS_LARGE_OBJECT)) { LOG_DEBUG_OBJECT_EVENTS << "DELETE HUMONGOUS object " << object_header; // humongous allocator increases size by region size - delete_size += region->Size(); - ++delete_count; + *delete_size_ += region->Size(); + ++(*delete_count_); } else { ASSERT(region->HasFlag(RegionFlag::IS_NONMOVABLE)); LOG_DEBUG_OBJECT_EVENTS << "DELETE NON MOVABLE object " << object_header; } return ObjectStatus::DEAD_OBJECT; - }; + } + + DEFAULT_COPY_SEMANTIC(NonRegularObjectsDeathChecker); + DEFAULT_MOVE_SEMANTIC(NonRegularObjectsDeathChecker); + +private: + size_t *delete_size_; + size_t *delete_count_; +}; + +template +template +void G1GC::CollectNonRegularObjects(GCTask &task) +{ + size_t delete_size = 0; + size_t delete_count = 0; + // Don't collect classes if --g1-track-free-objects is enabled. + // We need to know size of objects while iterating over all objects in the collected region. + auto death_checker = + g1_track_freed_objects_ + ? GCObjectVisitor( + NonRegularObjectsDeathChecker(&delete_size, &delete_count)) + : GCObjectVisitor( + NonRegularObjectsDeathChecker(&delete_size, &delete_count)); auto region_visitor = [this](PandaVector ®ions) { update_remset_thread_->InvalidateRegions(®ions); }; @@ -944,8 +980,14 @@ void G1GC::VerifyCollectAndMove(HeapVerifierIntoGCGetSettings()->FailOnHeapVerification() && fails_count > 0U) { + PandaStringStream log_stream; + log_stream << "Collection set size: " << collection_set.size() << "\n"; + for (const auto r : collection_set) { + log_stream << *r << (r->HasFlag(RegionFlag::IS_PROMOTED) ? " was promoted\n" : "\n"); + } LOG(FATAL, GC) << "Heap was corrupted during CollectAndMove GC phase, HeapVerifier found " << fails_count - << " corruptions"; + << " corruptions\n" + << log_stream.str(); } } } diff --git a/runtime/mem/gc/gc.h b/runtime/mem/gc/gc.h index b6c375291d81609103e09d3eeb0c249d1264214a..728c06b4e00ab9b80aefc6aa93c7b3a69df3f901 100644 --- a/runtime/mem/gc/gc.h +++ b/runtime/mem/gc/gc.h @@ -263,6 +263,12 @@ public: return phase_; } + inline GCTaskCause GetLastGCCause() const + { + // Atomic with acquire order reason: data race with another threads which can update the variable + return last_cause_.load(std::memory_order_acquire); + } + inline bool IsGCRunning() { // Atomic with seq_cst order reason: data race with gc_running_ with requirement for sequentially consistent diff --git a/runtime/mem/gc/hybrid-gc/hybrid_object_allocator.cpp b/runtime/mem/gc/hybrid-gc/hybrid_object_allocator.cpp index e1f254d43e5ddc6d90c3f8633fcc9a55025df76c..5e27fbc756854031bc245742a5d24650bafb33fe 100644 --- a/runtime/mem/gc/hybrid-gc/hybrid_object_allocator.cpp +++ b/runtime/mem/gc/hybrid-gc/hybrid_object_allocator.cpp @@ -124,9 +124,9 @@ size_t HybridObjectAllocator::VerifyAllocatorStatus() return 0; } -TLAB *HybridObjectAllocator::CreateNewTLAB(ManagedThread *thread) +TLAB *HybridObjectAllocator::CreateNewTLAB([[maybe_unused]] ManagedThread *thread) { - return object_allocator_->CreateNewTLAB(thread); + return object_allocator_->CreateTLAB(object_allocator_->GetMaxRegularObjectSize()); } size_t HybridObjectAllocator::GetTLABMaxAllocSize() diff --git a/runtime/mem/heap_manager.cpp b/runtime/mem/heap_manager.cpp index 46de7f85cabc7290e2f70a3603b40352a1f7eba3..a1c0d1cb3e899df82f56825c904a0e42b864aff4 100644 --- a/runtime/mem/heap_manager.cpp +++ b/runtime/mem/heap_manager.cpp @@ -61,7 +61,7 @@ bool HeapManager::Initialize(GCType gc_type, bool single_threaded, bool use_tlab break; } #undef FWD_GC_INIT - if (!object_allocator_.AsObjectAllocator()->IsTLABSupported() || single_threaded) { + if (!object_allocator_.AsObjectAllocator()->IsTLABSupported()) { use_tlab = false; } use_tlab_for_allocations_ = use_tlab; diff --git a/runtime/mem/heap_verifier.cpp b/runtime/mem/heap_verifier.cpp index 3949dd3ed97b30fedd0c78ee72efe77a915f038e..c6c394ae6fe3ed5cb73ce25aac6ea76c38927c79 100644 --- a/runtime/mem/heap_verifier.cpp +++ b/runtime/mem/heap_verifier.cpp @@ -280,8 +280,10 @@ void HeapVerifierIntoGC::CollectVerificationInfo(PandaVector::TraverseAllObjects(object, refs_collector); + if (object->IsMarkedForGC()) { + ref_number = 0; + ObjectHelpers::TraverseAllObjects(object, refs_collector); + } }; obj_allocator->IterateOverObjects(collect_functor); } @@ -329,7 +331,7 @@ size_t HeapVerifierIntoGC::VerifyAll(PandaVector &&ali // Check references in alive objects ObjectVisitor traverse_alive_obj = [&non_young_checker, &same_obj_checker, &ref_number, this, &it](ObjectHeader *object) { - if (this->InCollectableSpace(object) && !this->InAliveSpace(object)) { + if (!object->IsMarkedForGC() || (this->InCollectableSpace(object) && !this->InAliveSpace(object))) { return; } it = this->permanent_verification_info_.find(object); diff --git a/runtime/mem/object_helpers-inl.h b/runtime/mem/object_helpers-inl.h index b99b464e613e3f8367b590a4430a5f65b2fa9d58..7f8503383facb1da7f9d21e71a718096a9b39978 100644 --- a/runtime/mem/object_helpers-inl.h +++ b/runtime/mem/object_helpers-inl.h @@ -26,6 +26,11 @@ namespace panda::mem { +bool GCStaticObjectHelpers::IsClassObject(ObjectHeader *obj) +{ + return obj->ClassAddr()->IsClassClass(); +} + template void GCStaticObjectHelpers::TraverseClass(Class *cls, Handler &handler) { @@ -122,6 +127,11 @@ void GCStaticObjectHelpers::TraverseAllObjectsWithInfo(ObjectHeader *object_head } } +bool GCDynamicObjectHelpers::IsClassObject(ObjectHeader *obj) +{ + return obj->ClassAddr()->IsHClass(); +} + template void GCDynamicObjectHelpers::TraverseClass(coretypes::DynClass *dyn_class, Handler &handler) { diff --git a/runtime/mem/object_helpers.h b/runtime/mem/object_helpers.h index dba6ab3720cc81cc71ad56b8ffd808660915a2b5..100ef9eeab4bfabba34379a2e24ddca1d445221c 100644 --- a/runtime/mem/object_helpers.h +++ b/runtime/mem/object_helpers.h @@ -107,6 +107,11 @@ size_t GetDynClassInstanceSize(coretypes::DynClass *object); class GCStaticObjectHelpers { public: + /** + * Check the object is an instance of class + */ + static inline bool IsClassObject(ObjectHeader *obj); + /** * Traverse all kinds of object_header and call obj_visitor for each reference field. */ @@ -141,6 +146,11 @@ private: class GCDynamicObjectHelpers { public: + /** + * Check the object is an instance of class + */ + static inline bool IsClassObject(ObjectHeader *obj); + /** * Traverse all kinds of object_header and call obj_visitor for each reference field. */ diff --git a/runtime/mem/region_allocator-inl.h b/runtime/mem/region_allocator-inl.h index b6593d748a178dac98517e5ce72866a117a70765..6c6f3c262404f53e9705084685b8f9eb0cca0562 100644 --- a/runtime/mem/region_allocator-inl.h +++ b/runtime/mem/region_allocator-inl.h @@ -216,8 +216,7 @@ void *RegionAllocator::Alloc(size_t size, Alignment a } template -TLAB *RegionAllocator::CreateNewTLAB([[maybe_unused]] panda::ManagedThread *thread, - size_t size) +TLAB *RegionAllocator::CreateTLAB(size_t size) { ASSERT(size <= GetMaxRegularObjectSize()); ASSERT(AlignUp(size, GetAlignmentInBytes(DEFAULT_ALIGNMENT)) == size); @@ -227,15 +226,12 @@ TLAB *RegionAllocator::CreateNewTLAB([[maybe_unused]] os::memory::LockHolder lock(this->region_lock_); Region *region = nullptr; // first search in partial tlab map - // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) - if constexpr (USE_PARTIAL_TLAB) { - auto largest_tlab = retained_tlabs_.begin(); - if (largest_tlab != retained_tlabs_.end() && largest_tlab->first >= size) { - LOG(DEBUG, ALLOC) << "Use retained tlabs region " << region; - region = largest_tlab->second; - retained_tlabs_.erase(largest_tlab); - ASSERT(region->HasFlag(RegionFlag::IS_EDEN)); - } + auto largest_tlab = retained_tlabs_.begin(); + if (largest_tlab != retained_tlabs_.end() && largest_tlab->first >= size) { + LOG(DEBUG, ALLOC) << "Use retained tlabs region " << region; + region = largest_tlab->second; + retained_tlabs_.erase(largest_tlab); + ASSERT(region->HasFlag(RegionFlag::IS_EDEN)); } // allocate a free region if none partial tlab has enough space @@ -246,27 +242,12 @@ TLAB *RegionAllocator::CreateNewTLAB([[maybe_unused]] } } if (region != nullptr) { - // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) - if constexpr (!USE_PARTIAL_TLAB) { - // We don't reuse the same region for different TLABs. - // Therefore, update the size - size = region->GetRemainingSizeForTLABs(); - } - tlab = region->CreateTLAB(size); - ASSERT(tlab != nullptr); - ASAN_UNPOISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); - AllocConfigT::MemoryInit(tlab->GetStartAddr(), tlab->GetSize()); - ASAN_POISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); - LOG(DEBUG, ALLOC) << "Found a region " << region << " and create tlab " << tlab << " with memory starts at " - << tlab->GetStartAddr() << " and with size " << tlab->GetSize(); - // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) - if constexpr (USE_PARTIAL_TLAB) { - auto remaining_size = region->GetRemainingSizeForTLABs(); - if (remaining_size >= size) { - LOG(DEBUG, ALLOC) << "Add a region " << region << " with remained size " << remaining_size - << " to retained_tlabs"; - retained_tlabs_.insert(std::make_pair(remaining_size, region)); - } + tlab = CreateTLABInRegion(region, size); + auto remaining_size = region->GetRemainingSizeForTLABs(); + if (remaining_size >= size) { + LOG(DEBUG, ALLOC) << "Add a region " << region << " with remained size " << remaining_size + << " to retained_tlabs"; + retained_tlabs_.insert(std::make_pair(remaining_size, region)); } } } @@ -274,6 +255,37 @@ TLAB *RegionAllocator::CreateNewTLAB([[maybe_unused]] return tlab; } +template +TLAB *RegionAllocator::CreateRegionSizeTLAB() +{ + TLAB *tlab = nullptr; + + os::memory::LockHolder lock(this->region_lock_); + Region *region = this->template CreateAndSetUpNewRegion(REGION_SIZE, RegionFlag::IS_EDEN); + if (LIKELY(region != nullptr)) { + region->CreateTLABSupport(); + size_t size = region->GetRemainingSizeForTLABs(); + tlab = CreateTLABInRegion(region, size); + } + + return tlab; +} + +template +TLAB *RegionAllocator::CreateTLABInRegion(Region *region, size_t size) +{ + // We don't reuse the same region for different TLABs. + // Therefore, update the size + TLAB *tlab = region->CreateTLAB(size); + ASSERT(tlab != nullptr); + ASAN_UNPOISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); + AllocConfigT::MemoryInit(tlab->GetStartAddr(), tlab->GetSize()); + ASAN_POISON_MEMORY_REGION(tlab->GetStartAddr(), tlab->GetSize()); + LOG(DEBUG, ALLOC) << "Found a region " << region << " and create tlab " << tlab << " with memory starts at " + << tlab->GetStartAddr() << " and with size " << tlab->GetSize(); + return tlab; +} + template // TODO(agrebenkin) add set of flags from which to pick the regions and make it compile time template diff --git a/runtime/mem/region_allocator.h b/runtime/mem/region_allocator.h index b772d210b72b48ad9ec528943263f288db6a593f..4c505a738ad541731475bdd1f9222f0d5a634752 100644 --- a/runtime/mem/region_allocator.h +++ b/runtime/mem/region_allocator.h @@ -191,12 +191,17 @@ public: void Free([[maybe_unused]] void *mem) {} /** - * \brief Create new region allocator as thread local allocator buffer. - * @param thread - pointer to thread + * \brief Create a TLAB of the specified size * @param size - required size of tlab * @return newly allocated TLAB, TLAB is set to Empty is allocation failed. */ - TLAB *CreateNewTLAB(panda::ManagedThread *thread, size_t size = GetMaxRegularObjectSize()); + TLAB *CreateTLAB(size_t size); + + /** + * \brief Create a TLAB in a new region. TLAB will occupy the whole region. + * @return newly allocated TLAB, TLAB is set to Empty is allocation failed. + */ + TLAB *CreateRegionSizeTLAB(); /** * \brief Iterates over all objects allocated by this allocator. @@ -486,6 +491,7 @@ private: template void *AllocRegular(size_t align_size); + TLAB *CreateTLABInRegion(Region *region, size_t size); Region full_region_; Region *eden_current_region_; diff --git a/runtime/mem/vm_handle.h b/runtime/mem/vm_handle.h index cd06e63276d42c816de47a5eacb0c6bf3cfbf74b..7583d1a41f3e19501182d9f2bc777773cfedfb21 100644 --- a/runtime/mem/vm_handle.h +++ b/runtime/mem/vm_handle.h @@ -34,7 +34,9 @@ public: explicit VMHandle(ManagedThread *thread, ObjectHeader *object) { if (object != nullptr) { - address_ = thread->GetTopScope()->NewHandle(object); + auto scope = thread->GetTopScope(); + ASSERT(scope != nullptr); + address_ = scope->NewHandle(object); } else { address_ = reinterpret_cast(nullptr); } diff --git a/runtime/options.yaml b/runtime/options.yaml index 099ef46e357b12056784b5f57069ea2570810f59..7a3b12350072748eb73c97390513d1c7ce0efb30 100644 --- a/runtime/options.yaml +++ b/runtime/options.yaml @@ -103,6 +103,21 @@ options: default: false description: Is debugger enabled on runtime create +- name: sampling-profiler-enable + type: bool + default: false + description: Is the sampling profiler enabled during execution time + +- name: sampling-profiler-interval + type: uint32_t + default: 500 + description: Sampling profiler interval in microseconds + +- name: sampling-profiler-output-file + type: std::string + default: "" + description: Name of file to collect trace in .aspt format + - name: debugger-port type: uint32_t default: 19015 @@ -321,7 +336,7 @@ options: - default - 'true' - 'false' - description: Specify if we need to track removing objects (i.e. update objects count in memstats and log removed objects) during the G1GC collection or not. Default means true on the Release and false otherwise. + description: Specify whether G1GC tracks object removing (i.e. update objects count in memstats and log removed objects). Default means true on Release and false otherwise. If the value is true G1GC doesn't delete classes in concurrent phase because it needs to know object size. - name: gc-root-marking-stack-max-size type: uint32_t diff --git a/runtime/runtime.cpp b/runtime/runtime.cpp index 9c52ae18455d53e062aec7a53e669617425d6932..f46b00cd6f66c5c3dc748994a82db63f7e9d1285 100644 --- a/runtime/runtime.cpp +++ b/runtime/runtime.cpp @@ -349,6 +349,12 @@ bool Runtime::Create(const RuntimeOptions &options) instance->GetNotificationManager()->VmInitializationEvent(thread); instance->GetNotificationManager()->ThreadStartEvent(thread); + if (options.IsSamplingProfilerEnable()) { + instance->GetTools().CreateSamplingProfiler(); + instance->GetTools().StartSamplingProfiler(options.GetSamplingProfilerOutputFile(), + options.GetSamplingProfilerInterval()); + } + return true; } @@ -398,6 +404,10 @@ bool Runtime::Destroy() trace::ScopedTrace scoped_trace("Runtime shutdown"); + if (instance->GetOptions().IsSamplingProfilerEnable()) { + instance->GetTools().StopSamplingProfiler(); + } + // when signal start, but no signal stop tracing, should stop it if (Trace::is_tracing) { Trace::StopTracing(); @@ -1096,6 +1106,18 @@ Expected Runtime::ExecutePandaFile(std::string_view filenam class_linker_->GetAotManager()->VerifyClassHierarchy(); } + // Check if all input files are either quickened or not + uint32_t quickened_files = 0; + uint32_t panda_files = 0; + class_linker_->EnumeratePandaFiles([&quickened_files, &panda_files](const panda_file::File &pf) { + quickened_files += pf.GetHeader()->quickened_flag; + panda_files++; + return true; + }); + LOG_IF(quickened_files != 0 && quickened_files != panda_files, FATAL, RUNTIME) + << "All input files should be either quickened or not. Got " << quickened_files << " quickened files of " + << panda_files << " input files."; + return Execute(entry_point, args); } @@ -1182,6 +1204,7 @@ coretypes::String *Runtime::ResolveString(PandaVM *vm, const uint8_t *mutf8, uin coretypes::Array *Runtime::ResolveLiteralArray(PandaVM *vm, const Method &caller, uint32_t id) { auto *pf = caller.GetPandaFile(); + id = pf->GetLiteralArrays()[id]; LanguageContext ctx = GetLanguageContext(caller); return ResolveLiteralArray(vm, *pf, id, ctx); } @@ -1394,7 +1417,9 @@ void Runtime::InitNonZygoteOrPostFork(bool is_system_server, [[maybe_unused]] co // TODO(00510180): wait ResetGcPerformanceInfo() ready panda_vm_->PreStartup(); + init_hook(); + mem::GcHung::InitPostFork(is_system_server); } diff --git a/runtime/stack_walker.cpp b/runtime/stack_walker.cpp index fb767d1868fd58627a6f26e6b685d84df95c2a37..bace0c3818a9c2ba87de886a1250a80aeadca2d4 100644 --- a/runtime/stack_walker.cpp +++ b/runtime/stack_walker.cpp @@ -81,7 +81,10 @@ Method *StackWalker::GetMethod() } auto &cframe = GetCFrame(); if (!cframe.IsNative()) { - ASSERT(stackmap_.IsValid()); + // TODO(m.strizhak): replace this condition with assert after fixing JIT trampolines for sampler + if (!stackmap_.IsValid()) { + return nullptr; + } if (IsInlined()) { auto method_variant = code_info_.GetMethod(stackmap_, inline_depth_); if (std::holds_alternative(method_variant)) { @@ -112,6 +115,10 @@ StackWalker::CFrameType StackWalker::CreateCFrame(SlotType *ptr, uintptr_t npc, CalleeStorage *prev_callees) { CFrameType cframe(ptr); + // TODO(m.strizhak): replace this condition with assert after fixing JIT trampolines for sampler + if (cframe.GetMethod() == nullptr) { + return cframe; + } if (cframe.IsNativeMethod()) { return cframe; } diff --git a/runtime/templates/intrinsics.yaml.erb b/runtime/templates/intrinsics.yaml.erb index bc4b2b5ebda094a08492de0bb3c0be34a2ddc056..b77a0a8a256821648b7d18270cffe270d9039c16 100644 --- a/runtime/templates/intrinsics.yaml.erb +++ b/runtime/templates/intrinsics.yaml.erb @@ -19,19 +19,22 @@ safepoint_after_call: <%= intrinsic.respond_to?(:safepoint_after_call) && intrinsic.safepoint_after_call %> private: <%= intrinsic.respond_to?(:private) %> signature: +% stackrange = intrinsic.signature.respond_to?(:stackrange) && intrinsic.signature.stackrange +% full_args = intrinsic.signature.args + (stackrange ? ['ptr'] : []) ret: "<%= intrinsic.signature.ret %>" - args: <%= intrinsic.signature.args %> + stackrange: <%= stackrange %> + args: <%= full_args %> static: <%= intrinsic.respond_to?(:static) && intrinsic.static %> % if intrinsic.has_impl? impl: <%= intrinsic.wrapper_impl %> impl_signature: ret: "<%= get_ret_effective_type(intrinsic.signature.ret) %>" - args: <%= (!intrinsic.static ? [get_effective_type(intrinsic.class_name)] : []) + intrinsic.signature.args.map { |t| get_effective_type(t) }.flatten %> + args: <%= (!intrinsic.static ? [get_effective_type(intrinsic.class_name)] : []) + full_args.map { |t| get_effective_type(t) }.flatten %> % if intrinsic.need_abi_wrapper? orig_impl: <%= intrinsic.impl %> orig_impl_signature: ret: "<%= get_ret_type(intrinsic.signature.ret) %>" - args: <%= (!intrinsic.static ? [get_type(intrinsic.class_name)] : []) + intrinsic.signature.args.map { |t| get_type(t) }.flatten %> + args: <%= (!intrinsic.static ? [get_type(intrinsic.class_name)] : []) + full_args.map { |t| get_type(t) }.flatten %> % end need_decl: <%= intrinsic.impl.start_with?(Runtime::intrinsics_namespace) %> % end diff --git a/runtime/templates/intrinsics_gen.h.erb b/runtime/templates/intrinsics_gen.h.erb index 8b2a1345843d69712dcfc15ef98e22d21b3906c3..c474eaaa86e4f9b7c12d73fe0bedf408ff613e78 100644 --- a/runtime/templates/intrinsics_gen.h.erb +++ b/runtime/templates/intrinsics_gen.h.erb @@ -77,7 +77,7 @@ bool Initialize(panda::panda_file::SourceLang vm_lang) { return true; } -% Runtime::intrinsics.select(&:has_impl?).each do |intrinsic| +% Runtime::intrinsics.select{ |i| !i.signature.stackrange }.select(&:has_impl?).each do |intrinsic| % if intrinsic.private #ifndef PANDA_PRODUCT_BUILD % end diff --git a/runtime/templates/runtime.rb b/runtime/templates/runtime.rb index 7ed1123e476af2d70e25bcc3899ca3e7186bc7c6..8cdcc447fd9366b2f2e645d340244d0a24905f36 100755 --- a/runtime/templates/runtime.rb +++ b/runtime/templates/runtime.rb @@ -43,6 +43,7 @@ def get_type(type) 'f32' => ['float'], 'f64' => ['double'], 'any' => ['uint64_t'], + 'ptr' => ['void *'], 'acc' => ['uint64_t'], 'string_id' => ['uint32_t'], 'method_id' => ['uint32_t'], @@ -65,6 +66,7 @@ def get_ret_type(type) 'f32' => 'float', 'f64' => 'double', 'any' => 'uint64_t', + 'ptr' => 'void *', 'acc' => 'DecodedTaggedValue', 'string_id' => 'uint32_t', 'method_id' => 'uint32_t', diff --git a/runtime/tests/allocator_test_base.h b/runtime/tests/allocator_test_base.h index eff8041d88e93a12a18ede399278b01a4be6d638..9e325e3b6c2f3d9bb80de20a52ddf21e05048032 100644 --- a/runtime/tests/allocator_test_base.h +++ b/runtime/tests/allocator_test_base.h @@ -234,7 +234,7 @@ protected: * order too */ template - void AllocateFreeDifferentSizesTest(size_t elements_count, size_t pools_count, AllocatorArgs &&...allocatorArgs); + void AllocateFreeDifferentSizesTest(size_t elements_count, size_t pools_count, AllocatorArgs &&...allocator_args); /** * \brief Try to allocate too big object, must not allocate memory @@ -268,7 +268,7 @@ protected: * * Allocate and free memory and later reuse. Checking for two start addresses */ - template + template void AllocateReuseTest(size_t alignment_mask, size_t elements_count = 100); /** @@ -361,7 +361,7 @@ protected: * @param max_elements_count - maximum elements which will be allocated during test for each thread */ template - void MT_AllocTest(Allocator *allocator, size_t min_elements_count, size_t max_elements_count); + void MtAllocTest(Allocator *allocator, size_t min_elements_count, size_t max_elements_count); /** * \brief Simultaneously allocate/free objects in different threads @@ -373,7 +373,7 @@ protected: * @param free_granularity - granularity for objects free before total free */ template - void MT_AllocFreeTest(size_t min_elements_count, size_t max_elements_count, size_t free_granularity = 4); + void MtAllocFreeTest(size_t min_elements_count, size_t max_elements_count, size_t free_granularity = 4); /** * \brief Simultaneously allocate objects and iterate over objects (in range too) in different threads @@ -385,7 +385,7 @@ protected: * @param range_iteration_size - size of a iteration range during test. Must be a power of two */ template - void MT_AllocIterateTest(size_t min_elements_count, size_t max_elements_count, size_t range_iteration_size); + void MtAllocIterateTest(size_t min_elements_count, size_t max_elements_count, size_t range_iteration_size); /** * \brief Simultaneously allocate and collect objects in different threads @@ -397,7 +397,7 @@ protected: * @param max_thread_with_collect - maximum threads which will call collect simultaneously */ template - void MT_AllocCollectTest(size_t min_elements_count, size_t max_elements_count, size_t max_thread_with_collect = 1); + void MtAllocCollectTest(size_t min_elements_count, size_t max_elements_count, size_t max_thread_with_collect = 1); private: /** @@ -424,23 +424,22 @@ private: */ void MTTestPrologue(Allocator &allocator, size_t alloc_size); - static void MT_AllocRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t min_alloc_size, size_t max_alloc_size, - size_t min_elements_count, size_t max_elements_count); + static void MtAllocRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t min_alloc_size, size_t max_alloc_size, + size_t min_elements_count, size_t max_elements_count); - static void MT_AllocFreeRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t free_granularity, size_t min_alloc_size, - size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count); + static void MtAllocFreeRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t free_granularity, size_t min_alloc_size, + size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count); - static void MT_AllocIterateRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t range_iteration_size, - size_t min_alloc_size, size_t max_alloc_size, size_t min_elements_count, - size_t max_elements_count); + static void MtAllocIterateRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t range_iteration_size, size_t min_alloc_size, + size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count); - static void MT_AllocCollectRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t min_alloc_size, size_t max_alloc_size, - size_t min_elements_count, size_t max_elements_count, - uint32_t max_thread_with_collect, std::atomic *thread_with_collect); + static void MtAllocCollectRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t min_alloc_size, size_t max_alloc_size, + size_t min_elements_count, size_t max_elements_count, + uint32_t max_thread_with_collect, std::atomic *thread_with_collect); static std::unordered_set objects_set_; @@ -726,13 +725,13 @@ inline void AllocatorTest::AllocateTooMuchTest(size_t alloc_size, siz template inline void AllocatorTest::AllocateVectorTest(size_t elements_count) { - using element_type = size_t; + using ElementType = size_t; static constexpr size_t MAGIC_CONST = 3; mem::MemStatsType *mem_stats = new mem::MemStatsType(); Allocator allocator(mem_stats); AddMemoryPoolToAllocatorProtected(allocator); - using adapter_type = typename decltype(allocator.Adapter())::template rebind::other; - std::vector vec(allocator.Adapter()); + using AdapterType = typename decltype(allocator.Adapter())::template Rebind::other; + std::vector vec(allocator.Adapter()); for (size_t i = 0; i < elements_count; i++) { vec.push_back(i * MAGIC_CONST); @@ -753,10 +752,10 @@ inline void AllocatorTest::AllocateVectorTest(size_t elements_count) } template -template +template inline void AllocatorTest::AllocateReuseTest(size_t alignmnent_mask, size_t elements_count) { - static constexpr size_t SIZE_1 = sizeof(element_type); + static constexpr size_t SIZE_1 = sizeof(ElementType); static constexpr size_t SIZE_2 = SIZE_1 * 3; mem::MemStatsType *mem_stats = new mem::MemStatsType(); @@ -809,8 +808,8 @@ inline void AllocatorTest::ObjectIteratingSetUp(size_t free_granulari { AddMemoryPoolToAllocator(allocator); size_t allocated_pools = 1; - auto doAllocations = [pools_count]([[maybe_unused]] size_t allocated_pools_count, - [[maybe_unused]] size_t count) -> bool { + auto do_allocations = [pools_count]([[maybe_unused]] size_t allocated_pools_count, + [[maybe_unused]] size_t count) -> bool { if constexpr (ELEMENTS_COUNT_FOR_NOT_POOL_ALLOCATOR == 0) { return allocated_pools_count < pools_count; } else { @@ -820,7 +819,7 @@ inline void AllocatorTest::ObjectIteratingSetUp(size_t free_granulari }; // Allocations - while (doAllocations(allocated_pools, elements_count)) { + while (do_allocations(allocated_pools, elements_count)) { size_t size = RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE); size_t align = RandFromRange(LOG_ALIGN_MIN_VALUE, LOG_ALIGN_MAX_VALUE); void *mem = allocator.Alloc(size, Alignment(align)); @@ -955,8 +954,8 @@ template template inline void AllocatorTest::AsanTest(size_t free_granularity, size_t pools_count) { - using element_type = uint64_t; - static constexpr size_t ALLOC_SIZE = sizeof(element_type); + using ElementType = uint64_t; + static constexpr size_t ALLOC_SIZE = sizeof(ElementType); static constexpr size_t ALLOCATIONS_COUNT = ELEMENTS_COUNT; if (free_granularity == 0) { @@ -984,7 +983,7 @@ inline void AllocatorTest::AsanTest(size_t free_granularity, size_t p if (i % free_granularity == 0) { #ifdef PANDA_ASAN_ON EXPECT_DEATH(DeathWriteUint64(allocated_elements[i]), "") - << "Write " << sizeof(element_type) << " bytes at address " << std::hex << allocated_elements[i]; + << "Write " << sizeof(ElementType) << " bytes at address " << std::hex << allocated_elements[i]; #else continue; #endif // PANDA_ASAN_ON @@ -1032,9 +1031,9 @@ inline void AllocatorTest::AllocatedByThisAllocatorTest(Allocator &al } template -void AllocatorTest::MT_AllocRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t min_alloc_size, - size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count) +void AllocatorTest::MtAllocRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t min_alloc_size, + size_t max_alloc_size, size_t min_elements_count, size_t max_elements_count) { size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count); std::unordered_set used_indexes; @@ -1080,10 +1079,10 @@ void AllocatorTest::MT_AllocRun(AllocatorTest *allocator_t } template -void AllocatorTest::MT_AllocFreeRun(AllocatorTest *allocator_test_instance, Allocator *allocator, - std::atomic *num_finished, size_t free_granularity, - size_t min_alloc_size, size_t max_alloc_size, size_t min_elements_count, - size_t max_elements_count) +void AllocatorTest::MtAllocFreeRun(AllocatorTest *allocator_test_instance, Allocator *allocator, + std::atomic *num_finished, size_t free_granularity, + size_t min_alloc_size, size_t max_alloc_size, size_t min_elements_count, + size_t max_elements_count) { size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count); std::unordered_set used_indexes; @@ -1153,11 +1152,11 @@ void AllocatorTest::MT_AllocFreeRun(AllocatorTest *allocat } template -void AllocatorTest::MT_AllocIterateRun(AllocatorTest *allocator_test_instance, - Allocator *allocator, std::atomic *num_finished, - size_t range_iteration_size, size_t min_alloc_size, - size_t max_alloc_size, size_t min_elements_count, - size_t max_elements_count) +void AllocatorTest::MtAllocIterateRun(AllocatorTest *allocator_test_instance, + Allocator *allocator, std::atomic *num_finished, + size_t range_iteration_size, size_t min_alloc_size, + size_t max_alloc_size, size_t min_elements_count, + size_t max_elements_count) { static constexpr size_t ITERATION_IN_RANGE_COUNT = 100; size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count); @@ -1210,12 +1209,12 @@ void AllocatorTest::MT_AllocIterateRun(AllocatorTest *allo } template -void AllocatorTest::MT_AllocCollectRun(AllocatorTest *allocator_test_instance, - Allocator *allocator, std::atomic *num_finished, - size_t min_alloc_size, size_t max_alloc_size, - size_t min_elements_count, size_t max_elements_count, - uint32_t max_thread_with_collect, - std::atomic *thread_with_collect) +void AllocatorTest::MtAllocCollectRun(AllocatorTest *allocator_test_instance, + Allocator *allocator, std::atomic *num_finished, + size_t min_alloc_size, size_t max_alloc_size, + size_t min_elements_count, size_t max_elements_count, + uint32_t max_thread_with_collect, + std::atomic *thread_with_collect) { size_t elements_count = allocator_test_instance->RandFromRange(min_elements_count, max_elements_count); @@ -1277,8 +1276,8 @@ void AllocatorTest::MTTestPrologue(Allocator &allocator, size_t alloc template template -inline void AllocatorTest::MT_AllocTest(Allocator *allocator, size_t min_elements_count, - size_t max_elements_count) +inline void AllocatorTest::MtAllocTest(Allocator *allocator, size_t min_elements_count, + size_t max_elements_count) { #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32) // We have an issue with QEMU during MT tests. Issue 2852 @@ -1286,7 +1285,7 @@ inline void AllocatorTest::MT_AllocTest(Allocator *allocator, size_t #endif std::atomic num_finished = 0; for (size_t i = 0; i < THREADS_COUNT; i++) { - auto tid = os::thread::ThreadStart(&MT_AllocRun, this, allocator, &num_finished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, + auto tid = os::thread::ThreadStart(&MtAllocRun, this, allocator, &num_finished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count); os::thread::ThreadDetach(tid); } @@ -1303,8 +1302,8 @@ inline void AllocatorTest::MT_AllocTest(Allocator *allocator, size_t template template -inline void AllocatorTest::MT_AllocFreeTest(size_t min_elements_count, size_t max_elements_count, - size_t free_granularity) +inline void AllocatorTest::MtAllocFreeTest(size_t min_elements_count, size_t max_elements_count, + size_t free_granularity) { #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32) // We have an issue with QEMU during MT tests. Issue 2852 @@ -1319,7 +1318,7 @@ inline void AllocatorTest::MT_AllocFreeTest(size_t min_elements_count for (size_t i = 0; i < THREADS_COUNT; i++) { (void)free_granularity; - auto tid = os::thread::ThreadStart(&MT_AllocFreeRun, this, &allocator, &num_finished, free_granularity, + auto tid = os::thread::ThreadStart(&MtAllocFreeRun, this, &allocator, &num_finished, free_granularity, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count); os::thread::ThreadDetach(tid); } @@ -1337,8 +1336,8 @@ inline void AllocatorTest::MT_AllocFreeTest(size_t min_elements_count template template -inline void AllocatorTest::MT_AllocIterateTest(size_t min_elements_count, size_t max_elements_count, - size_t range_iteration_size) +inline void AllocatorTest::MtAllocIterateTest(size_t min_elements_count, size_t max_elements_count, + size_t range_iteration_size) { #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32) // We have an issue with QEMU during MT tests. Issue 2852 @@ -1353,7 +1352,7 @@ inline void AllocatorTest::MT_AllocIterateTest(size_t min_elements_co for (size_t i = 0; i < THREADS_COUNT; i++) { (void)range_iteration_size; - auto tid = os::thread::ThreadStart(&MT_AllocIterateRun, this, &allocator, &num_finished, range_iteration_size, + auto tid = os::thread::ThreadStart(&MtAllocIterateRun, this, &allocator, &num_finished, range_iteration_size, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count); os::thread::ThreadDetach(tid); } @@ -1377,8 +1376,8 @@ inline void AllocatorTest::MT_AllocIterateTest(size_t min_elements_co template template -inline void AllocatorTest::MT_AllocCollectTest(size_t min_elements_count, size_t max_elements_count, - size_t max_thread_with_collect) +inline void AllocatorTest::MtAllocCollectTest(size_t min_elements_count, size_t max_elements_count, + size_t max_thread_with_collect) { #if defined(PANDA_TARGET_ARM64) || defined(PANDA_TARGET_32) // We have an issue with QEMU during MT tests. Issue 2852 @@ -1393,7 +1392,7 @@ inline void AllocatorTest::MT_AllocCollectTest(size_t min_elements_co MTTestPrologue(allocator, RandFromRange(MIN_ALLOC_SIZE, MAX_ALLOC_SIZE)); for (size_t i = 0; i < THREADS_COUNT; i++) { - auto tid = os::thread::ThreadStart(&MT_AllocCollectRun, this, &allocator, &num_finished, MIN_ALLOC_SIZE, + auto tid = os::thread::ThreadStart(&MtAllocCollectRun, this, &allocator, &num_finished, MIN_ALLOC_SIZE, MAX_ALLOC_SIZE, min_elements_count, max_elements_count, max_thread_with_collect, &thread_with_collect); os::thread::ThreadDetach(tid); diff --git a/runtime/tests/arch/aarch64/invokation_helper.S b/runtime/tests/arch/aarch64/invokation_helper.S index 5f687770e4c47cb5d53d905ec08e5c19b0214f93..6b5b541200ffdfda687c9dcdfe8246195875e6a2 100644 --- a/runtime/tests/arch/aarch64/invokation_helper.S +++ b/runtime/tests/arch/aarch64/invokation_helper.S @@ -70,3 +70,34 @@ InvokeHelper: CFI_DEF_CFA(sp, 0) ret CFI_ENDPROC + +// I2CBridgeTestDynFn *I2CBridgeTestDynCallee +.comm I2CBridgeTestDynCallee, 8, 8 + +// I2CBridgeTestDynWrapper, follows DynamicMethod calling convention +.global I2CBridgeTestDynWrapper +TYPE_FUNCTION(I2CBridgeTestDynWrapper) +I2CBridgeTestDynWrapper: + CFI_STARTPROC + CFI_DEF_CFA(sp, 0) + stp fp, lr, [sp, -16]! + CFI_ADJUST_CFA_OFFSET(2 * 8) + CFI_REL_OFFSET(lr, 8) + CFI_REL_OFFSET(fp, 0) + mov fp, sp + CFI_DEF_CFA_REGISTER(fp) + + // load pointer to args + add x2, fp, 16 + + adrp lr, I2CBridgeTestDynCallee + ldr lr, [lr, #:lo12:I2CBridgeTestDynCallee] + blr lr + + mov sp, fp + ldp fp, lr, [sp], #16 + CFI_RESTORE(lr) + CFI_RESTORE(fp) + CFI_DEF_CFA(sp, 0) + ret + CFI_ENDPROC diff --git a/runtime/tests/arch/amd64/invokation_helper.S b/runtime/tests/arch/amd64/invokation_helper.S index be1f165a9e4e99c7a91d2a82c853d65abee8324f..8ed34d4a83f2508fbaf99c8427278feed8f04727 100644 --- a/runtime/tests/arch/amd64/invokation_helper.S +++ b/runtime/tests/arch/amd64/invokation_helper.S @@ -93,3 +93,33 @@ InvokeHelper: CFI_DEF_CFA(rsp, (1 * 8)) retq CFI_ENDPROC + + +// I2CBridgeTestDynFn *I2CBridgeTestDynCallee +.comm I2CBridgeTestDynCallee, 8, 8 + +// I2CBridgeTestDynWrapper, follows DynamicMethod calling convention +.global I2CBridgeTestDynWrapper +TYPE_FUNCTION(I2CBridgeTestDynWrapper) +I2CBridgeTestDynWrapper: + CFI_STARTPROC + CFI_DEF_CFA(rsp, 8) + // setup stack frame + pushq %rbp + CFI_ADJUST_CFA_OFFSET(8) + CFI_REL_OFFSET(rbp, 0) + movq %rsp, %rbp + CFI_DEF_CFA_REGISTER(rbp) + + // load pointer to args + leaq 16(%rbp), %rdx + + movq I2CBridgeTestDynCallee(%rip), %rax + callq *%rax + + movq %rbp, %rsp + popq %rbp + CFI_RESTORE(rbp) + CFI_DEF_CFA(rsp, 8) + retq + CFI_ENDPROC diff --git a/runtime/tests/bitmap_clear_range_test.cpp b/runtime/tests/bitmap_clear_range_test.cpp index 64193cf9f6c38593bdfdb0b48807cf7f1edb52c7..2900d7c99ca03b0c03575eb7bc8569877ae3ba31 100644 --- a/runtime/tests/bitmap_clear_range_test.cpp +++ b/runtime/tests/bitmap_clear_range_test.cpp @@ -31,15 +31,15 @@ TEST_F(BitmapTest, ClearRange) std::make_unique((HEAP_CAPACITY >> Bitmap::LOG_BITSPERWORD) / DEFAULT_ALIGNMENT_IN_BYTES); MemBitmap bm(ToVoidPtr(heap_begin), HEAP_CAPACITY, bm_ptr.get()); - using mem_range = std::pair; - constexpr mem_range FIRST_RANGE {0, 10_KB + DEFAULT_ALIGNMENT_IN_BYTES}; - constexpr mem_range SECOND_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, DEFAULT_ALIGNMENT_IN_BYTES}; - constexpr mem_range THIRD_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, 2 * DEFAULT_ALIGNMENT_IN_BYTES}; - constexpr mem_range FOURTH_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, 5 * DEFAULT_ALIGNMENT_IN_BYTES}; - constexpr mem_range FIFTH_RANGE {1_KB + DEFAULT_ALIGNMENT_IN_BYTES, 2_KB + 5 * DEFAULT_ALIGNMENT_IN_BYTES}; - constexpr mem_range SIXTH_RANGE {0, HEAP_CAPACITY}; - - std::vector ranges {FIRST_RANGE, SECOND_RANGE, THIRD_RANGE, FOURTH_RANGE, FIFTH_RANGE, SIXTH_RANGE}; + using MemRangeTest = std::pair; + constexpr MemRangeTest FIRST_RANGE {0, 10_KB + DEFAULT_ALIGNMENT_IN_BYTES}; + constexpr MemRangeTest SECOND_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, DEFAULT_ALIGNMENT_IN_BYTES}; + constexpr MemRangeTest THIRD_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, 2 * DEFAULT_ALIGNMENT_IN_BYTES}; + constexpr MemRangeTest FOURTH_RANGE {DEFAULT_ALIGNMENT_IN_BYTES, 5 * DEFAULT_ALIGNMENT_IN_BYTES}; + constexpr MemRangeTest FIFTH_RANGE {1_KB + DEFAULT_ALIGNMENT_IN_BYTES, 2_KB + 5 * DEFAULT_ALIGNMENT_IN_BYTES}; + constexpr MemRangeTest SIXTH_RANGE {0, HEAP_CAPACITY}; + + std::vector ranges {FIRST_RANGE, SECOND_RANGE, THIRD_RANGE, FOURTH_RANGE, FIFTH_RANGE, SIXTH_RANGE}; for (const auto &range : ranges) { bm.IterateOverChunks([&bm](void *mem) { bm.Set(mem); }); diff --git a/runtime/tests/bitmap_page_alignment_test.cpp b/runtime/tests/bitmap_page_alignment_test.cpp index d61d2536175739c31823bb5a14b1bd9fd48fd3a3..1e75146d709edda44edfc0cd81e9575dc98c0a96 100644 --- a/runtime/tests/bitmap_page_alignment_test.cpp +++ b/runtime/tests/bitmap_page_alignment_test.cpp @@ -99,7 +99,7 @@ TEST_F(BitmapTest, TSANMultithreadingTest) auto set_thread = std::thread([&bm, &heap_begin, &iterations] { for (size_t i = 0; i < iterations * iterations; i++) { bool value = std::rand() % 2 == 1; - size_t offset = fn_rounddown(std::rand() % heap_capacity, 4_KB); + size_t offset = FnRounddown(std::rand() % heap_capacity, 4_KB); if (value) { bm.AtomicTestAndSet(ToVoidPtr(heap_begin + offset)); diff --git a/runtime/tests/bitmap_test_base.h b/runtime/tests/bitmap_test_base.h index 1c45b3ed861f5fcfda5763683af743e0fd7a90d1..0c968c1702f792a24a9e2142c72015877344039f 100644 --- a/runtime/tests/bitmap_test_base.h +++ b/runtime/tests/bitmap_test_base.h @@ -31,28 +31,31 @@ using BitmapWordType = panda::mem::Bitmap::BitmapWordType; class BitmapVerify { public: using BitmapType = MemBitmap; - BitmapVerify(BitmapType *bitmap, void *begin, void *end) : bitmap_(bitmap), begin_(begin), end_(end) {} + BitmapVerify(BitmapType *bitmap_arg, void *begin_arg, void *end_arg) + : bitmap_(bitmap_arg), begin(begin_arg), end(end_arg) + { + } void operator()(void *obj) { - EXPECT_TRUE(obj >= begin_); - EXPECT_TRUE(obj <= end_); + EXPECT_TRUE(obj >= begin); + EXPECT_TRUE(obj <= end); EXPECT_EQ(bitmap_->Test(obj), ((BitmapType::ToPointerType(obj) & ADDRESS_MASK_TO_SET) != 0)); } BitmapType *const bitmap_; - void *begin_; - void *end_; + void *begin; + void *end; static constexpr BitmapWordType ADDRESS_MASK_TO_SET = 0xF; }; -size_t fn_rounddown(size_t val, size_t alignment) +size_t FnRounddown(size_t val, size_t alignment) { size_t mask = ~((static_cast(1) * alignment) - 1); return val & mask; } -template +template static void RunTest(TestFn &&fn) { auto heap_begin = BitmapTest::HEAP_STARTING_ADDRESS; @@ -66,12 +69,12 @@ static void RunTest(TestFn &&fn) constexpr int TEST_REPEAT = 1; for (int i = 0; i < TEST_REPEAT; ++i) { - auto bm_ptr = std::make_unique((heap_capacity >> Bitmap::LOG_BITSPERWORD) / kAlignment); - MemBitmap bm(ToVoidPtr(heap_begin), heap_capacity, bm_ptr.get()); + auto bm_ptr = std::make_unique((heap_capacity >> Bitmap::LOG_BITSPERWORD) / K_ALIGNMENT); + MemBitmap bm(ToVoidPtr(heap_begin), heap_capacity, bm_ptr.get()); constexpr int NUM_BITS_TO_MODIFY = 1000; for (int j = 0; j < NUM_BITS_TO_MODIFY; ++j) { - size_t offset = fn_rounddown(std::rand() % heap_capacity, kAlignment); + size_t offset = FnRounddown(std::rand() % heap_capacity, K_ALIGNMENT); bool set = std::rand() % 2 == 1; if (set) { @@ -83,12 +86,12 @@ static void RunTest(TestFn &&fn) constexpr int NUM_TEST_RANGES = 50; for (int j = 0; j < NUM_TEST_RANGES; ++j) { - const size_t offset = fn_rounddown(std::rand() % heap_capacity, kAlignment); + const size_t offset = FnRounddown(std::rand() % heap_capacity, K_ALIGNMENT); const size_t remain = heap_capacity - offset; - const size_t end = offset + fn_rounddown(std::rand() % (remain + 1), kAlignment); + const size_t end = offset + FnRounddown(std::rand() % (remain + 1), K_ALIGNMENT); size_t manual = 0; - for (ObjectPointerType k = offset; k < end; k += kAlignment) { + for (ObjectPointerType k = offset; k < end; k += K_ALIGNMENT) { if (bm.Test(ToVoidPtr(heap_begin + k))) { manual++; } @@ -99,23 +102,23 @@ static void RunTest(TestFn &&fn) } } -template +template static void RunTestCount() { - auto count_test_fn = [](MemBitmap *bitmap, ObjectPointerType begin, ObjectPointerType end, + auto count_test_fn = [](MemBitmap *bitmap, ObjectPointerType begin, ObjectPointerType end, size_t manual_count) { size_t count = 0; auto count_fn = [&count]([[maybe_unused]] void *obj) { count++; }; bitmap->IterateOverMarkedChunkInRange(ToVoidPtr(begin), ToVoidPtr(end), count_fn); EXPECT_EQ(count, manual_count); }; - RunTest(count_test_fn); + RunTest(count_test_fn); } -template +template void RunTestOrder() { - auto order_test_fn = [](MemBitmap *bitmap, ObjectPointerType begin, ObjectPointerType end, + auto order_test_fn = [](MemBitmap *bitmap, ObjectPointerType begin, ObjectPointerType end, size_t manual_count) { void *last_ptr = nullptr; auto order_check = [&last_ptr](void *obj) { @@ -136,7 +139,7 @@ void RunTestOrder() EXPECT_NE(nullptr, last_ptr); } }; - RunTest(order_test_fn); + RunTest(order_test_fn); } TEST_F(BitmapTest, AtomicClearSetTest) diff --git a/runtime/tests/bump_allocator_test.cpp b/runtime/tests/bump_allocator_test.cpp index 154261a0d6dc0429d006c927e109795886a4ad93..ed12902f7c06df2bdbe10def96b8b037a1bad554 100644 --- a/runtime/tests/bump_allocator_test.cpp +++ b/runtime/tests/bump_allocator_test.cpp @@ -21,9 +21,9 @@ namespace panda::mem { -template +template using NonObjectBumpAllocator = - BumpPointerAllocator; + BumpPointerAllocator; class BumpAllocatorTest : public testing::Test { public: @@ -45,7 +45,7 @@ public: for (auto i : allocated_mem_mmap_) { panda::os::mem::UnmapRaw(std::get<0>(i), std::get<1>(i)); } - for (auto i : allocated_arenas) { + for (auto i : allocated_arenas_) { delete i; } PoolManager::Finalize(); @@ -61,12 +61,12 @@ protected: std::pair new_pair {mem, size}; allocated_mem_mmap_.push_back(new_pair); auto arena = new Arena(size, mem); - allocated_arenas.push_back(arena); + allocated_arenas_.push_back(arena); return arena; } std::vector> allocated_mem_mmap_; - std::vector allocated_arenas; + std::vector allocated_arenas_; unsigned seed_; }; @@ -114,18 +114,18 @@ TEST_F(BumpAllocatorTest, AlignedAlloc) TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) { - using ALLOC_TYPE = uint64_t; - static_assert(sizeof(ALLOC_TYPE) % DEFAULT_ALIGNMENT_IN_BYTES == 0); + using AllocType = uint64_t; + static_assert(sizeof(AllocType) % DEFAULT_ALIGNMENT_IN_BYTES == 0); constexpr size_t TLAB_SIZE = SIZE_1M; constexpr size_t COMMON_BUFFER_SIZE = SIZE_1M; - constexpr size_t ALLOC_SIZE = sizeof(ALLOC_TYPE); + constexpr size_t ALLOC_SIZE = sizeof(AllocType); constexpr size_t TLAB_ALLOC_COUNT_SIZE = TLAB_SIZE / ALLOC_SIZE; constexpr size_t COMMON_ALLOC_COUNT_SIZE = COMMON_BUFFER_SIZE / ALLOC_SIZE; size_t mask = DEFAULT_ALIGNMENT_IN_BYTES - 1; - std::array tlab_elements; - std::array common_elements; + std::array tlab_elements; + std::array common_elements; auto pool = PoolManager::GetMmapMemPool()->AllocPool(TLAB_SIZE + COMMON_BUFFER_SIZE, SpaceType::SPACE_TYPE_INTERNAL, AllocatorType::BUMP_ALLOCATOR); mem::MemStatsType mem_stats; @@ -134,9 +134,9 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) // Allocations in common buffer srand(seed_); for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) { - common_elements[i] = static_cast(allocator.Alloc(sizeof(ALLOC_TYPE))); + common_elements[i] = static_cast(allocator.Alloc(sizeof(AllocType))); ASSERT_TRUE(common_elements[i] != nullptr) << ", seed:" << seed_; - *common_elements[i] = rand() % std::numeric_limits::max(); + *common_elements[i] = rand() % std::numeric_limits::max(); } TLAB *tlab = allocator.CreateNewTLAB(TLAB_SIZE); @@ -145,14 +145,14 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) // Allocations in TLAB srand(seed_); for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) { - tlab_elements[i] = static_cast(tlab->Alloc(sizeof(ALLOC_TYPE))); + tlab_elements[i] = static_cast(tlab->Alloc(sizeof(AllocType))); ASSERT_TRUE(tlab_elements[i] != nullptr) << ", seed:" << seed_; - *tlab_elements[i] = rand() % std::numeric_limits::max(); + *tlab_elements[i] = rand() % std::numeric_limits::max(); } // Check that we don't have memory in the buffer: - ASSERT_TRUE(allocator.Alloc(sizeof(ALLOC_TYPE)) == nullptr); - ASSERT_TRUE(tlab->Alloc(sizeof(ALLOC_TYPE)) == nullptr); + ASSERT_TRUE(allocator.Alloc(sizeof(AllocType)) == nullptr); + ASSERT_TRUE(tlab->Alloc(sizeof(AllocType)) == nullptr); // Allocations checking in common buffer srand(seed_); @@ -160,7 +160,7 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) ASSERT_NE(common_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_; ASSERT_EQ(reinterpret_cast(common_elements[i]) & mask, static_cast(0)) << "value of i: " << i << ", seed:" << seed_; - ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits::max()) + ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits::max()) << "value of i: " << i << ", seed:" << seed_; } @@ -170,7 +170,7 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) ASSERT_NE(tlab_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_; ASSERT_EQ(reinterpret_cast(tlab_elements[i]) & mask, static_cast(0)) << "value of i: " << i << ", seed:" << seed_; - ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits::max()) + ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits::max()) << "value of i: " << i << ", seed:" << seed_; } } @@ -182,22 +182,22 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) // Allocations in TLAB srand(seed_); for (size_t i = 0; i < TLAB_ALLOC_COUNT_SIZE; ++i) { - tlab_elements[i] = static_cast(tlab->Alloc(sizeof(ALLOC_TYPE))); + tlab_elements[i] = static_cast(tlab->Alloc(sizeof(AllocType))); ASSERT_TRUE(tlab_elements[i] != nullptr) << ", seed:" << seed_; - *tlab_elements[i] = rand() % std::numeric_limits::max(); + *tlab_elements[i] = rand() % std::numeric_limits::max(); } // Allocations in common buffer srand(seed_); for (size_t i = 0; i < COMMON_ALLOC_COUNT_SIZE; ++i) { - common_elements[i] = static_cast(allocator.Alloc(sizeof(ALLOC_TYPE))); + common_elements[i] = static_cast(allocator.Alloc(sizeof(AllocType))); ASSERT_TRUE(common_elements[i] != nullptr) << ", seed:" << seed_; - *common_elements[i] = rand() % std::numeric_limits::max(); + *common_elements[i] = rand() % std::numeric_limits::max(); } // Check that we don't have memory in the buffer: - ASSERT_TRUE(allocator.Alloc(sizeof(ALLOC_TYPE)) == nullptr); - ASSERT_TRUE(tlab->Alloc(sizeof(ALLOC_TYPE)) == nullptr); + ASSERT_TRUE(allocator.Alloc(sizeof(AllocType)) == nullptr); + ASSERT_TRUE(tlab->Alloc(sizeof(AllocType)) == nullptr); // Allocations checking in TLAB srand(seed_); @@ -205,7 +205,7 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) ASSERT_NE(tlab_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_; ASSERT_EQ(reinterpret_cast(tlab_elements[i]) & mask, static_cast(0)) << "value of i: " << i << ", seed:" << seed_; - ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits::max()) + ASSERT_EQ(*tlab_elements[i], rand() % std::numeric_limits::max()) << "value of i: " << i << ", seed:" << seed_; } @@ -215,7 +215,7 @@ TEST_F(BumpAllocatorTest, CreateTLABAndAlloc) ASSERT_NE(common_elements[i], nullptr) << "value of i: " << i << ", seed:" << seed_; ASSERT_EQ(reinterpret_cast(common_elements[i]) & mask, static_cast(0)) << "value of i: " << i << ", seed:" << seed_; - ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits::max()) + ASSERT_EQ(*common_elements[i], rand() % std::numeric_limits::max()) << "value of i: " << i << ", seed:" << seed_; } } diff --git a/runtime/tests/c2i_bridge_test.cpp b/runtime/tests/c2i_bridge_test.cpp index daa9cfdf537690f3f2253def21121c900d775b9b..24ae941acff2bb92deeef3d4fc7daac6e6e6c510 100644 --- a/runtime/tests/c2i_bridge_test.cpp +++ b/runtime/tests/c2i_bridge_test.cpp @@ -66,8 +66,8 @@ public: #endif Runtime::Create(options); - thread_ = MTManagedThread::GetCurrent(); - thread_->ManagedCodeBegin(); + thread = MTManagedThread::GetCurrent(); + thread->ManagedCodeBegin(); SetUpHelperFunctions("PandaAssembly"); } @@ -76,7 +76,7 @@ public: if constexpr (RUNTIME_ARCH == Arch::AARCH32) { return; } - thread_->ManagedCodeEnd(); + thread->ManagedCodeEnd(); Runtime::Destroy(); } @@ -97,17 +97,17 @@ public: class_linker->AddPandaFile(std::move(pf)); auto descriptor = std::make_unique(); - std::optional lang = panda_file::LanguageFromString(language); - if (!lang) { + std::optional lang_local = panda_file::LanguageFromString(language); + if (!lang_local) { UNREACHABLE(); } - auto *extension = class_linker->GetExtension(lang.value_or(panda_file::SourceLang::PANDA_ASSEMBLY)); + auto *extension = class_linker->GetExtension(lang_local.value_or(panda_file::SourceLang::PANDA_ASSEMBLY)); Class *klass = extension->GetClass(ClassHelper::GetDescriptor(utf::CStringAsMutf8("TestUtils"), descriptor.get())); - Method *cmpDyn = klass->GetDirectMethod(utf::CStringAsMutf8("cmpDyn")); - ASSERT_NE(cmpDyn, nullptr); - cmpDyn->SetCompiledEntryPoint(reinterpret_cast(CmpDynImpl)); + Method *cmp_dyn = klass->GetDirectMethod(utf::CStringAsMutf8("cmpDyn")); + ASSERT_NE(cmp_dyn, nullptr); + cmp_dyn->SetCompiledEntryPoint(reinterpret_cast(CmpDynImpl)); Method *ldundefined = klass->GetDirectMethod(utf::CStringAsMutf8("ldundefined")); ASSERT_NE(ldundefined, nullptr); @@ -118,7 +118,7 @@ public: { Runtime *runtime = Runtime::GetCurrent(); ClassLinker *class_linker = runtime->GetClassLinker(); - LanguageContext ctx = runtime->GetLanguageContext(lang_); + LanguageContext ctx = runtime->GetLanguageContext(lang); std::ostringstream out; out << ".language " << ctx << '\n'; @@ -166,7 +166,7 @@ public: { Runtime *runtime = Runtime::GetCurrent(); ClassLinker *class_linker = runtime->GetClassLinker(); - LanguageContext ctx = runtime->GetLanguageContext(lang_); + LanguageContext ctx = runtime->GetLanguageContext(lang); std::ostringstream out; std::ostringstream signature; @@ -264,8 +264,8 @@ public: return main; } - MTManagedThread *thread_ {nullptr}; - panda_file::SourceLang lang_ {panda_file::SourceLang::PANDA_ASSEMBLY}; + MTManagedThread *thread {nullptr}; + panda_file::SourceLang lang {panda_file::SourceLang::PANDA_ASSEMBLY}; }; TEST_F(CompiledCodeToInterpreterBridgeTest, InvokeVoidNoArg) diff --git a/runtime/tests/card_table_test.cpp b/runtime/tests/card_table_test.cpp index 93c4a55f1957eff99d0bb5c1c5e7549c52eeec8f..fe062494e882d1f4296cf822be453bd0fc729899 100644 --- a/runtime/tests/card_table_test.cpp +++ b/runtime/tests/card_table_test.cpp @@ -32,12 +32,12 @@ namespace panda::mem::test { class CardTableTest : public testing::Test { protected: // static constexpr size_t kHeapSize = 0xffffffff; - static constexpr size_t kAllocCount = 1000; + static constexpr size_t K_ALLOC_COUNT = 1000; // static constexpr size_t maxCardIndex = kHeapSize / ::panda::mem::CardTable::GetCardSize(); - std::mt19937 gen; - std::uniform_int_distribution addrDis; - std::uniform_int_distribution cardIndexDis; - mem::MemStatsType *mem_stats; + std::mt19937 gen_; + std::uniform_int_distribution addr_dis_; + std::uniform_int_distribution card_index_dis_; + mem::MemStatsType *mem_stats_; CardTableTest() { @@ -56,9 +56,9 @@ protected: thread_->ManagedCodeBegin(); internal_allocator_ = thread_->GetVM()->GetHeapManager()->GetInternalAllocator(); - addrDis = std::uniform_int_distribution(0, GetPoolSize() - 1); + addr_dis_ = std::uniform_int_distribution(0, GetPoolSize() - 1); ASSERT(GetPoolSize() % CardTable::GetCardSize() == 0); - cardIndexDis = std::uniform_int_distribution(0, GetPoolSize() / CardTable::GetCardSize() - 1); + card_index_dis_ = std::uniform_int_distribution(0, GetPoolSize() / CardTable::GetCardSize() - 1); card_table_ = std::make_unique(internal_allocator_, GetMinAddress(), GetPoolSize()); card_table_->Initialize(); } @@ -72,7 +72,7 @@ protected: void SetUp() override { - gen.seed(seed_); + gen_.seed(seed_); } void TearDown() override @@ -96,12 +96,12 @@ protected: uintptr_t GetRandomAddress() { - return PoolManager::GetMmapMemPool()->GetMinObjectAddress() + addrDis(gen) % GetPoolSize(); + return PoolManager::GetMmapMemPool()->GetMinObjectAddress() + addr_dis_(gen_) % GetPoolSize(); } size_t GetRandomCardIndex() { - return cardIndexDis(gen) % GetPoolSize(); + return card_index_dis_(gen_) % GetPoolSize(); } // generate address at the begining of the card @@ -118,23 +118,23 @@ protected: TEST_F(CardTableTest, MarkTest) { - size_t markedCnt = 0; + size_t marked_cnt = 0; - for (size_t i = 0; i < kAllocCount; i++) { + for (size_t i = 0; i < K_ALLOC_COUNT; i++) { uintptr_t addr; addr = GetRandomAddress(); if (!card_table_->IsMarked(addr)) { - ++markedCnt; + ++marked_cnt; card_table_->MarkCard(addr); } } for (auto card : *card_table_) { if (card->IsMarked()) { - markedCnt--; + marked_cnt--; } } - ASSERT_EQ(markedCnt, 0); + ASSERT_EQ(marked_cnt, 0); } TEST_F(CardTableTest, MarkAndClearAllTest) @@ -158,13 +158,13 @@ TEST_F(CardTableTest, MarkAndClearAllTest) TEST_F(CardTableTest, ClearTest) { - std::set addrSet; + std::set addr_set; // Mark some cards not more than once - while (addrSet.size() <= kAllocCount) { + while (addr_set.size() <= K_ALLOC_COUNT) { uintptr_t addr; addr = GetRandomCardAddress(); - if (!addrSet.insert(addr).second) { + if (!addr_set.insert(addr).second) { continue; } card_table_->MarkCard(addr); @@ -179,7 +179,7 @@ TEST_F(CardTableTest, ClearTest) } } - ASSERT_EQ(addrSet.size(), cleared_cnt); + ASSERT_EQ(addr_set.size(), cleared_cnt); // check that there are no marked for (auto card : *card_table_) { ASSERT_EQ(card->IsMarked(), false); @@ -188,13 +188,13 @@ TEST_F(CardTableTest, ClearTest) TEST_F(CardTableTest, ClearAllTest) { - std::set addrSet; + std::set addr_set; // Mark some cards not more than once - while (addrSet.size() < kAllocCount) { + while (addr_set.size() < K_ALLOC_COUNT) { uintptr_t addr; addr = GetRandomCardAddress(); - if (!addrSet.insert(addr).second) { + if (!addr_set.insert(addr).second) { continue; } card_table_->MarkCard(addr); @@ -223,21 +223,21 @@ TEST_F(CardTableTest, corner_cases) card_table_->MarkCard(last); ASSERT_EQ(card_table_->IsMarked(last), true); // Mark last byte of second card - uintptr_t secondLast = GetMinAddress() + 2 * card_table_->GetCardSize() - 1; - ASSERT_EQ(card_table_->IsMarked(secondLast), false); - card_table_->MarkCard(secondLast); + uintptr_t second_last = GetMinAddress() + 2 * card_table_->GetCardSize() - 1; + ASSERT_EQ(card_table_->IsMarked(second_last), false); + card_table_->MarkCard(second_last); ASSERT_EQ(((*card_table_->begin()) + 1)->IsMarked(), true); } TEST_F(CardTableTest, VisitMarked) { - size_t markedCnt = 0; + size_t marked_cnt = 0; - while (markedCnt < kAllocCount) { + while (marked_cnt < K_ALLOC_COUNT) { uintptr_t addr; addr = GetRandomAddress(); if (!card_table_->IsMarked(addr)) { - ++markedCnt; + ++marked_cnt; card_table_->MarkCard(addr); } } diff --git a/runtime/tests/compiler_queue_test.cpp b/runtime/tests/compiler_queue_test.cpp index 4a828e7e765b82f68ffeb3fa4e9dd9080708f9a8..da6f92c9fc9606a18c43d5acafcd2210af85c07b 100644 --- a/runtime/tests/compiler_queue_test.cpp +++ b/runtime/tests/compiler_queue_test.cpp @@ -93,15 +93,15 @@ static void GetAndCheckMethodIfExists(CompilerQueueInterface *queue, Method *tar static void WaitForExpire(uint millis) { - constexpr uint delta = 10; - uint64_t startTime = time::GetCurrentTimeInMillis(); + constexpr uint DELTA = 10; + uint64_t start_time = time::GetCurrentTimeInMillis(); std::this_thread::sleep_for(std::chrono::milliseconds(millis)); // sleep_for() works nondeterministically // use an additional check for more confidence // Note, the queue implementation uses GetCurrentTimeInMillis // to update aged counter - while (time::GetCurrentTimeInMillis() < startTime + millis) { - std::this_thread::sleep_for(std::chrono::milliseconds(delta)); + while (time::GetCurrentTimeInMillis() < start_time + millis) { + std::this_thread::sleep_for(std::chrono::milliseconds(DELTA)); } } @@ -182,9 +182,9 @@ TEST_F(CompilerQueueTest, Expire) ASSERT_NE(g_method, nullptr); RuntimeOptions options; - constexpr int CompilerTaskLifeSpan1 = 500; + constexpr int COMPILER_TASK_LIFE_SPAN1 = 500; CompilerPriorityCounterQueue queue(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - options.GetCompilerQueueMaxLength(), CompilerTaskLifeSpan1); + options.GetCompilerQueueMaxLength(), COMPILER_TASK_LIFE_SPAN1); queue.AddTask(CompilerTask {main_method, 0}); queue.AddTask(CompilerTask {f_method, 0}); queue.AddTask(CompilerTask {g_method, 0}); @@ -195,9 +195,9 @@ TEST_F(CompilerQueueTest, Expire) auto method = queue.GetTask().GetMethod(); ASSERT_EQ(method, nullptr); - constexpr int CompilerTaskLifeSpan2 = 0; + constexpr int COMPILER_TASK_LIFE_SPAN2 = 0; CompilerPriorityCounterQueue queue2(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - options.GetCompilerQueueMaxLength(), CompilerTaskLifeSpan2); + options.GetCompilerQueueMaxLength(), COMPILER_TASK_LIFE_SPAN2); queue2.AddTask(CompilerTask {main_method, 0}); queue2.AddTask(CompilerTask {f_method, 0}); queue2.AddTask(CompilerTask {g_method, 0}); @@ -262,9 +262,9 @@ TEST_F(CompilerQueueTest, MaxLimit) g_method->SetHotnessCounter(3); RuntimeOptions options; - constexpr int CompilerQueueMaxLength1 = 100; + constexpr int COMPILER_QUEUE_MAX_LENGTH1 = 100; CompilerPriorityCounterQueue queue(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - CompilerQueueMaxLength1, options.GetCompilerTaskLifeSpan()); + COMPILER_QUEUE_MAX_LENGTH1, options.GetCompilerTaskLifeSpan()); for (int i = 0; i < 40; i++) { queue.AddTask(CompilerTask {main_method, 0}); @@ -282,9 +282,9 @@ TEST_F(CompilerQueueTest, MaxLimit) ASSERT_EQ(method, nullptr); // check an option - constexpr int CompilerQueueMaxLength2 = 1; + constexpr int COMPILER_QUEUE_MAX_LENGTH2 = 1; CompilerPriorityCounterQueue queue2(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - CompilerQueueMaxLength2, options.GetCompilerTaskLifeSpan()); + COMPILER_QUEUE_MAX_LENGTH2, options.GetCompilerTaskLifeSpan()); queue2.AddTask(CompilerTask {main_method, 0}); queue2.AddTask(CompilerTask {f_method, 0}); @@ -378,10 +378,10 @@ TEST_F(CompilerQueueTest, AgedExpire) g_method->SetHotnessCounter(1000); RuntimeOptions options; - constexpr int CompilerEpochDuration1 = 500; + constexpr int COMPILER_EPOCH_DURATION1 = 500; CompilerPriorityAgedCounterQueue queue(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), options.GetCompilerQueueMaxLength(), options.GetCompilerDeathCounterValue(), - CompilerEpochDuration1); + COMPILER_EPOCH_DURATION1); queue.AddTask(CompilerTask {main_method, 0}); queue.AddTask(CompilerTask {f_method, 0}); queue.AddTask(CompilerTask {g_method, 0}); @@ -392,10 +392,10 @@ TEST_F(CompilerQueueTest, AgedExpire) auto method = queue.GetTask().GetMethod(); ASSERT_EQ(method, nullptr); - constexpr int CompilerEpochDuration2 = 1; + constexpr int COMPILER_EPOCH_DURATION2 = 1; CompilerPriorityAgedCounterQueue queue2(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), options.GetCompilerQueueMaxLength(), options.GetCompilerDeathCounterValue(), - CompilerEpochDuration2); + COMPILER_EPOCH_DURATION2); queue2.AddTask(CompilerTask {main_method, 0}); queue2.AddTask(CompilerTask {f_method, 0}); @@ -482,9 +482,9 @@ TEST_F(CompilerQueueTest, AgedMaxLimit) ASSERT_EQ(method, nullptr); // check an option - constexpr int CompilerQueueMaxLength = 1; + constexpr int COMPILER_QUEUE_MAX_LENGTH = 1; CompilerPriorityAgedCounterQueue queue2(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - CompilerQueueMaxLength, options.GetCompilerDeathCounterValue(), + COMPILER_QUEUE_MAX_LENGTH, options.GetCompilerDeathCounterValue(), options.GetCompilerEpochDuration()); queue2.AddTask(CompilerTask {main_method, 0}); @@ -514,9 +514,9 @@ TEST_F(CompilerQueueTest, AgedDeathCounter) g_method->SetHotnessCounter(30000); RuntimeOptions options; - constexpr int CompilerDeathCounterValue = 50; + constexpr int COMPILER_DEATH_COUNTER_VALUE = 50; CompilerPriorityAgedCounterQueue queue(thread_->GetVM()->GetHeapManager()->GetInternalAllocator(), - options.GetCompilerQueueMaxLength(), CompilerDeathCounterValue, + options.GetCompilerQueueMaxLength(), COMPILER_DEATH_COUNTER_VALUE, options.GetCompilerEpochDuration()); queue.AddTask(CompilerTask {main_method, 0}); diff --git a/runtime/tests/debugger_test.cpp b/runtime/tests/debugger_test.cpp index 53e283c3fc9a76a548ab35ddb3af08a1cd660822..b6dbc95ac83ed2f16e98f505192e7226d6b7271d 100644 --- a/runtime/tests/debugger_test.cpp +++ b/runtime/tests/debugger_test.cpp @@ -61,11 +61,11 @@ static uint64_t FromPtr(ObjectHeader *ptr) return static_cast(reinterpret_cast(ptr)); } -template +template static Frame *CreateFrame(size_t nregs, Method *method, Frame *prev) { uint32_t ext_sz = EmptyExtFrameDataSize; - void *mem = aligned_alloc(8, Frame::GetAllocSize(Frame::GetActualSize(nregs), ext_sz)); + void *mem = aligned_alloc(8, Frame::GetAllocSize(Frame::GetActualSize(nregs), ext_sz)); return new (Frame::FromExt(mem, ext_sz)) Frame(mem, method, prev, nregs); } diff --git a/runtime/tests/exception_test.cpp b/runtime/tests/exception_test.cpp index 56acc74b3ce39d8e42858491184bcc90b626b0af..a9fee9cabdec05f495dad2bf149cfeac84246e23 100644 --- a/runtime/tests/exception_test.cpp +++ b/runtime/tests/exception_test.cpp @@ -25,7 +25,7 @@ namespace panda::test { -inline std::string separator() +inline std::string Separator() { #ifdef _WIN32 return "\\"; @@ -52,7 +52,7 @@ protected: auto exec_path = panda::os::file::File::GetExecutablePath(); std::string panda_std_lib = - exec_path.Value() + separator() + ".." + separator() + "pandastdlib" + separator() + "pandastdlib.bin"; + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "pandastdlib.bin"; options_.SetBootPandaFiles({panda_std_lib}); } diff --git a/runtime/tests/frame_allocator_test.cpp b/runtime/tests/frame_allocator_test.cpp index 1f530f64d4aa7d328858432014ecd6a7a5a9bb35..a53b08588b31e8e94075a4d7a14aaaf9e4eb71e6 100644 --- a/runtime/tests/frame_allocator_test.cpp +++ b/runtime/tests/frame_allocator_test.cpp @@ -95,19 +95,19 @@ public: ASSERT_NE(alloc3.Alloc(THIRD_FRAME_SIZE), nullptr); } - template + template void AlignmentTest(bool use_malloc) const { - FrameAllocator alloc(use_malloc); + FrameAllocator alloc(use_malloc); constexpr size_t MAX_SIZE = 256; std::array array {nullptr}; for (size_t i = 1; i <= MAX_SIZE; i++) { - array[i] = alloc.Alloc(i * GetAlignmentInBytes(alignment)); + array[i] = alloc.Alloc(i * GetAlignmentInBytes(ALIGNMENT)); if (array[i] == nullptr) { break; } ASSERT_NE(array[i], nullptr); - ASSERT_EQ(ToUintPtr(array[i]), AlignUp(ToUintPtr(array[i]), GetAlignmentInBytes(alignment))); + ASSERT_EQ(ToUintPtr(array[i]), AlignUp(ToUintPtr(array[i]), GetAlignmentInBytes(ALIGNMENT))); *(static_cast(array[i])) = i; } for (size_t i = MAX_SIZE; i != 0; i--) { diff --git a/runtime/tests/frame_test.cpp b/runtime/tests/frame_test.cpp index 70e6dbdc5b5eed94c2a18c2ff2021bf4afd598ed..2bb65c5ed25a8783810913097af8531df7ebef46 100644 --- a/runtime/tests/frame_test.cpp +++ b/runtime/tests/frame_test.cpp @@ -45,11 +45,11 @@ protected: panda::MTManagedThread *thread_; }; -template +template Frame *CreateFrame(size_t nregs, Method *method, Frame *prev) { uint32_t ext_sz = EmptyExtFrameDataSize; - void *mem = aligned_alloc(8, panda::Frame::GetAllocSize(Frame::GetActualSize(nregs), ext_sz)); + void *mem = aligned_alloc(8, panda::Frame::GetAllocSize(Frame::GetActualSize(nregs), ext_sz)); return new (Frame::FromExt(mem, ext_sz)) panda::Frame(mem, method, prev, nregs); } diff --git a/runtime/tests/freelist_allocator_test.cpp b/runtime/tests/freelist_allocator_test.cpp index d623ccc953336f4cc8e79d0e3605aff2b0ea3a89..e7978d47b3de41ac631c7d4b13a92314e4930091 100644 --- a/runtime/tests/freelist_allocator_test.cpp +++ b/runtime/tests/freelist_allocator_test.cpp @@ -186,7 +186,7 @@ TEST_F(FreeListAllocatorTest, AllocatedByFreeListAllocatorTest) TEST_F(FreeListAllocatorTest, FailedLinksTest) { - static constexpr size_t min_alloc_size = FREELIST_ALLOCATOR_MIN_SIZE; + static constexpr size_t MIN_ALLOC_SIZE = FREELIST_ALLOCATOR_MIN_SIZE; mem::MemStatsType *mem_stats = new mem::MemStatsType(); NonObjectFreeListAllocator allocator(mem_stats); AddMemoryPoolToAllocator(allocator); @@ -194,52 +194,52 @@ TEST_F(FreeListAllocatorTest, FailedLinksTest) std::array, 3> memory_elements; for (size_t i = 0; i < 3; i++) { - void *mem = allocator.Alloc(min_alloc_size); + void *mem = allocator.Alloc(MIN_ALLOC_SIZE); ASSERT_TRUE(mem != nullptr); - size_t index = SetBytesFromByteArray(mem, min_alloc_size); + size_t index = SetBytesFromByteArray(mem, MIN_ALLOC_SIZE); std::pair new_pair(mem, index); memory_elements.at(i) = new_pair; } pair = memory_elements[1]; - ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair))); + ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), MIN_ALLOC_SIZE, std::get<1>(pair))); allocator.Free(std::get<0>(pair)); pair = memory_elements[0]; - ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair))); + ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), MIN_ALLOC_SIZE, std::get<1>(pair))); allocator.Free(std::get<0>(pair)); { - void *mem = allocator.Alloc(min_alloc_size * 2); + void *mem = allocator.Alloc(MIN_ALLOC_SIZE * 2); ASSERT_TRUE(mem != nullptr); - size_t index = SetBytesFromByteArray(mem, min_alloc_size * 2); + size_t index = SetBytesFromByteArray(mem, MIN_ALLOC_SIZE * 2); std::pair new_pair(mem, index); memory_elements.at(0) = new_pair; } { - void *mem = allocator.Alloc(min_alloc_size); + void *mem = allocator.Alloc(MIN_ALLOC_SIZE); ASSERT_TRUE(mem != nullptr); - size_t index = SetBytesFromByteArray(mem, min_alloc_size); + size_t index = SetBytesFromByteArray(mem, MIN_ALLOC_SIZE); std::pair new_pair(mem, index); memory_elements.at(1) = new_pair; } { pair = memory_elements[0]; - ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size * 2, std::get<1>(pair))); + ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), MIN_ALLOC_SIZE * 2, std::get<1>(pair))); allocator.Free(std::get<0>(pair)); } { pair = memory_elements[1]; - ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair))); + ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), MIN_ALLOC_SIZE, std::get<1>(pair))); allocator.Free(std::get<0>(pair)); } { pair = memory_elements[2]; - ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), min_alloc_size, std::get<1>(pair))); + ASSERT_TRUE(CompareBytesWithByteArray(std::get<0>(pair), MIN_ALLOC_SIZE, std::get<1>(pair))); allocator.Free(std::get<0>(pair)); } delete mem_stats; @@ -247,18 +247,18 @@ TEST_F(FreeListAllocatorTest, FailedLinksTest) TEST_F(FreeListAllocatorTest, MaxAllocationSizeTest) { - static constexpr size_t alloc_size = MAX_ALLOC_SIZE; - static constexpr size_t alloc_count = 2; + static constexpr size_t ALLOC_SIZE = MAX_ALLOC_SIZE; + static constexpr size_t ALLOC_COUNT = 2; mem::MemStatsType *mem_stats = new mem::MemStatsType(); NonObjectFreeListAllocator allocator(mem_stats); AddMemoryPoolToAllocator(allocator); - std::array memory_elements; - for (size_t i = 0; i < alloc_count; i++) { - void *mem = allocator.Alloc(alloc_size); + std::array memory_elements; + for (size_t i = 0; i < ALLOC_COUNT; i++) { + void *mem = allocator.Alloc(ALLOC_SIZE); ASSERT_TRUE(mem != nullptr); memory_elements.at(i) = mem; } - for (size_t i = 0; i < alloc_count; i++) { + for (size_t i = 0; i < ALLOC_COUNT; i++) { allocator.Free(memory_elements.at(i)); } delete mem_stats; @@ -346,8 +346,8 @@ TEST_F(FreeListAllocatorTest, MTAllocFreeTest) 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) + THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC); for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocFreeTest(MIN_ELEMENTS_COUNT, - MAX_ELEMENTS_COUNT); + MtAllocFreeTest(MIN_ELEMENTS_COUNT, + MAX_ELEMENTS_COUNT); ClearPoolManager(true); } } @@ -370,7 +370,7 @@ TEST_F(FreeListAllocatorTest, MTAllocIterateTest) 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) + THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC); for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocIterateTest( + MtAllocIterateTest( MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity()); ClearPoolManager(true); } @@ -394,8 +394,8 @@ TEST_F(FreeListAllocatorTest, MTAllocCollectTest) 2 * (AlignUp(MAX_ELEMENTS_COUNT * MAX_MT_ALLOC_SIZE, DEFAULT_POOL_SIZE_FOR_ALLOC)) + THREADS_COUNT * DEFAULT_POOL_SIZE_FOR_ALLOC); for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocCollectTest(MIN_ELEMENTS_COUNT, - MAX_ELEMENTS_COUNT); + MtAllocCollectTest(MIN_ELEMENTS_COUNT, + MAX_ELEMENTS_COUNT); ClearPoolManager(true); } } diff --git a/runtime/tests/g1gc_fullgc_test.cpp b/runtime/tests/g1gc_fullgc_test.cpp index 40d8e107dc50af79ace967c25a979250ad7c5da5..195db31e36bcc9012bf3dadc5d6d513e00f6ea41 100644 --- a/runtime/tests/g1gc_fullgc_test.cpp +++ b/runtime/tests/g1gc_fullgc_test.cpp @@ -63,26 +63,26 @@ public: template class GCHook : public GCListener { public: - GCHook(F hook) : hook_(hook) {}; + GCHook(F hook_arg) : hook(hook_arg) {}; void GCStarted([[maybe_unused]] size_t heap_size) override {} void GCFinished([[maybe_unused]] const GCTask &task, [[maybe_unused]] size_t heap_size_before_gc, [[maybe_unused]] size_t heap_size) override { - hook_(); + hook(); } - F hook_; + F hook; }; - void SetupRuntime(const std::string &gc_type) + void SetupRuntime(const std::string &gc_type_param) { RuntimeOptions options; options.SetShouldLoadBootPandaFiles(false); options.SetShouldInitializeIntrinsics(false); options.SetUseTlabForAllocations(false); - options.SetGcType(gc_type); + options.SetGcType(gc_type_param); options.SetGcTriggerType("debug"); options.SetGcDebugTriggerStart(std::numeric_limits::max()); options.SetCompilerEnableJit(false); @@ -91,24 +91,24 @@ public: [[maybe_unused]] bool success = Runtime::Create(options); ASSERT(success); - thread_ = panda::MTManagedThread::GetCurrent(); - gc_type_ = Runtime::GetGCType(options, plugins::RuntimeTypeToLang(options.GetRuntimeType())); - [[maybe_unused]] auto gc = thread_->GetVM()->GetGC(); - ASSERT(gc->GetType() == panda::mem::GCTypeFromString(gc_type)); - ASSERT(gc->IsGenerational()); - thread_->ManagedCodeBegin(); + thread = panda::MTManagedThread::GetCurrent(); + gc_type = Runtime::GetGCType(options, plugins::RuntimeTypeToLang(options.GetRuntimeType())); + [[maybe_unused]] auto gc_local = thread->GetVM()->GetGC(); + ASSERT(gc_local->GetType() == panda::mem::GCTypeFromString(gc_type_param)); + ASSERT(gc_local->IsGenerational()); + thread->ManagedCodeBegin(); } void ResetRuntime() { DeleteHandles(); - internal_allocator_->Delete(gccnt_); - thread_->ManagedCodeEnd(); + internal_allocator->Delete(gccnt); + thread->ManagedCodeEnd(); bool success = Runtime::Destroy(); ASSERT_TRUE(success) << "Cannot destroy Runtime"; } - template + template ObjVec MakeAllocations(size_t min_size, size_t max_size, size_t count, size_t *allocated, size_t *requested, F space_checker, bool check_oom_in_tenured = false); @@ -126,23 +126,23 @@ public: void TearDown() override {} - panda::MTManagedThread *thread_; - GCType gc_type_; - - LanguageContext ctx_ {nullptr}; - ObjectAllocatorBase *object_allocator_; - mem::InternalAllocatorPtr internal_allocator_; - PandaVM *vm_; - GC *gc_; - std::vector handles_; - MemStatsType *ms_; - GCStats *gc_ms_; - coretypes::Array *root_ = nullptr; - size_t root_size_ = 0; - GCCounter *gccnt_; + panda::MTManagedThread *thread; + GCType gc_type; + + LanguageContext ctx {nullptr}; + ObjectAllocatorBase *object_allocator; + mem::InternalAllocatorPtr internal_allocator; + PandaVM *vm; + GC *gc; + std::vector handles; + MemStatsType *ms; + GCStats *gc_ms; + coretypes::Array *root = nullptr; + size_t root_size = 0; + GCCounter *gccnt; }; -template +template G1GCFullGCTest::ObjVec G1GCFullGCTest::MakeAllocations(size_t min_size, size_t max_size, size_t count, size_t *allocated, size_t *requested, [[maybe_unused]] F space_checker, bool check_oom_in_tenured) @@ -163,20 +163,20 @@ G1GCFullGCTest::ObjVec G1GCFullGCTest::MakeAllocations(size_t min_size, size_t m } } ObjVec result; - result.reserve(count * multi); + result.reserve(count * MULTI); for (size_t j = 0; j < count; ++j) { size_t size = obj_templates[j].length() + sizeof(coretypes::String); if (check_oom_in_tenured) { // Leaving 5MB in tenured seems OK auto free = - reinterpret_cast(object_allocator_->GetHeapSpace())->GetCurrentFreeTenuredSize(); + reinterpret_cast(object_allocator->GetHeapSpace())->GetCurrentFreeTenuredSize(); if (size + 5000000 > free) { return result; } } - for (size_t i = 0; i < multi; ++i) { + for (size_t i = 0; i < MULTI; ++i) { coretypes::String *string_obj = coretypes::String::CreateFromMUtf8( - reinterpret_cast(&obj_templates[j][0]), obj_templates[j].length(), ctx_, vm_); + reinterpret_cast(&obj_templates[j][0]), obj_templates[j].length(), ctx, vm); ASSERT(string_obj != nullptr); ASSERT(space_checker(ToUintPtr(string_obj))); *allocated += GetAlignedObjectSize(size); @@ -191,11 +191,11 @@ void G1GCFullGCTest::InitRoot() { ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker(); Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY) - ->GetClass(ctx_.GetStringArrayClassDescriptor()); + ->GetClass(ctx.GetStringArrayClassDescriptor()); ASSERT_NE(klass, nullptr); - root_ = coretypes::Array::Create(klass, ROOT_MAX_SIZE); - root_size_ = 0; - MakeObjectsPermAlive({root_}); + root = coretypes::Array::Create(klass, ROOT_MAX_SIZE); + root_size = 0; + MakeObjectsPermAlive({root}); } void G1GCFullGCTest::MakeObjectsAlive(ObjVec objects, int every) @@ -206,9 +206,9 @@ void G1GCFullGCTest::MakeObjectsAlive(ObjVec objects, int every) if (cnt != 0) { continue; } - root_->Set(root_size_, obj); - root_size_++; - ASSERT(root_size_ < ROOT_MAX_SIZE); + root->Set(root_size, obj); + root_size++; + ASSERT(root_size < ROOT_MAX_SIZE); cnt = every; } } @@ -221,8 +221,8 @@ void G1GCFullGCTest::MakeObjectsGarbage(size_t start_idx, size_t after_end_idx, if (cnt != 0) { continue; } - root_->Set(i, 0); - root_size_++; + root->Set(i, 0); + root_size++; cnt = every; } } @@ -237,15 +237,15 @@ void G1GCFullGCTest::MakeObjectsPermAlive(ObjVec objects, int every) if (cnt != 0) { continue; } - result.push_back(internal_allocator_->New>(thread_, obj)); + result.push_back(internal_allocator->New>(thread, obj)); cnt = every; } - handles_.push_back(result); + handles.push_back(result); } void G1GCFullGCTest::DumpHandles() { - for (auto &hv : handles_) { + for (auto &hv : handles) { for (auto *handle : hv) { std::cout << "vector " << (void *)&hv << " handle " << (void *)handle << " obj " << handle->GetPtr() << std::endl; @@ -255,22 +255,22 @@ void G1GCFullGCTest::DumpHandles() void G1GCFullGCTest::DumpAliveObjects() { - std::cout << "Alive root array : " << handles_[0][0]->GetPtr() << std::endl; - for (size_t i = 0; i < root_size_; ++i) { - if (root_->Get(i) != nullptr) { - std::cout << "Alive idx " << i << " : " << root_->Get(i) << std::endl; + std::cout << "Alive root array : " << handles[0][0]->GetPtr() << std::endl; + for (size_t i = 0; i < root_size; ++i) { + if (root->Get(i) != nullptr) { + std::cout << "Alive idx " << i << " : " << root->Get(i) << std::endl; } } } void G1GCFullGCTest::DeleteHandles() { - for (auto &hv : handles_) { + for (auto &hv : handles) { for (auto *handle : hv) { - internal_allocator_->Delete(handle); + internal_allocator->Delete(handle); } } - handles_.clear(); + handles.clear(); } template @@ -278,15 +278,15 @@ void G1GCFullGCTest::PrepareTest() { if constexpr (std::is_same::value) { DeleteHandles(); - ctx_ = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); - object_allocator_ = thread_->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); - vm_ = Runtime::GetCurrent()->GetPandaVM(); - internal_allocator_ = Runtime::GetCurrent()->GetClassLinker()->GetAllocator(); - gc_ = vm_->GetGC(); - ms_ = vm_->GetMemStats(); - gc_ms_ = vm_->GetGCStats(); - gccnt_ = internal_allocator_->New(); - gc_->AddListener(gccnt_); + ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); + object_allocator = thread->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); + vm = Runtime::GetCurrent()->GetPandaVM(); + internal_allocator = Runtime::GetCurrent()->GetClassLinker()->GetAllocator(); + gc = vm->GetGC(); + ms = vm->GetMemStats(); + gc_ms = vm->GetGCStats(); + gccnt = internal_allocator->New(); + gc->AddListener(gccnt); InitRoot(); } else { UNREACHABLE(); // NYI @@ -295,9 +295,9 @@ void G1GCFullGCTest::PrepareTest() bool G1GCFullGCTest::IsInYoung(uintptr_t addr) { - switch (gc_type_) { + switch (gc_type) { case GCType::GEN_GC: { - return object_allocator_->IsAddressInYoungSpace(addr); + return object_allocator->IsAddressInYoungSpace(addr); } case GCType::G1_GC: { auto mem_pool = PoolManager::GetMmapMemPool(); @@ -317,25 +317,25 @@ TEST_F(G1GCFullGCTest, TestIntensiveAlloc) std::string gctype = static_cast(GCStringFromType(GCType::G1_GC)); SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); [[maybe_unused]] size_t bytes, raw_objects_size; [[maybe_unused]] size_t young_size = reinterpret_cast( - reinterpret_cast(object_allocator_)->GetHeapSpace()) + reinterpret_cast(object_allocator)->GetHeapSpace()) ->GetCurrentMaxYoungSize(); [[maybe_unused]] size_t heap_size = mem::MemConfig::GetHeapSizeLimit(); - [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator_); + [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator); [[maybe_unused]] size_t max_y_size = g1_alloc->GetYoungAllocMaxSize(); [[maybe_unused]] auto y_space_check = [&](uintptr_t addr) -> bool { return IsInYoung(addr); }; [[maybe_unused]] auto h_space_check = [&](uintptr_t addr) -> bool { return !IsInYoung(addr); }; [[maybe_unused]] auto t_free = - reinterpret_cast(object_allocator_->GetHeapSpace())->GetCurrentFreeTenuredSize(); + reinterpret_cast(object_allocator->GetHeapSpace())->GetCurrentFreeTenuredSize(); const size_t y_obj_size = max_y_size / 10; - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - size_t initial_heap = ms_->GetFootprintHeap(); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + size_t initial_heap = ms->GetFootprintHeap(); { // Ordinary young shall not be broken when intermixed with explicits @@ -346,41 +346,41 @@ TEST_F(G1GCFullGCTest, TestIntensiveAlloc) &raw_objects_size, y_space_check); allocated += bytes; if (i++ % 100 == 0) { - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); } } - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(initial_heap, ms_->GetFootprintHeap()); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(initial_heap, ms->GetFootprintHeap()); } { // Intensive allocations surviving 1 young - auto old_root_size = root_size_; + auto old_root_size = root_size; size_t allocated = 0; bool gc_happened = false; GCHook gchook([&old_root_size, this, &gc_happened]() { - MakeObjectsGarbage(old_root_size, this->root_size_); - old_root_size = this->root_size_; + MakeObjectsGarbage(old_root_size, this->root_size); + old_root_size = this->root_size; gc_happened = 1; }); - gc_->AddListener(&gchook); + gc->AddListener(&gchook); while (allocated < 4 * heap_size) { ObjVec ov1 = MakeAllocations(y_obj_size, y_obj_size, 1, &bytes, &raw_objects_size, y_space_check); MakeObjectsAlive(ov1, 1); - t_free = reinterpret_cast(object_allocator_->GetHeapSpace()) + t_free = reinterpret_cast(object_allocator->GetHeapSpace()) ->GetCurrentFreeTenuredSize(); allocated += bytes; } - MakeObjectsGarbage(old_root_size, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(initial_heap, ms_->GetFootprintHeap()); - gc_->RemoveListener(&gchook); + MakeObjectsGarbage(old_root_size, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(initial_heap, ms->GetFootprintHeap()); + gc->RemoveListener(&gchook); } - MakeObjectsGarbage(0, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(initial_heap, ms_->GetFootprintHeap()); + MakeObjectsGarbage(0, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(initial_heap, ms->GetFootprintHeap()); } ResetRuntime(); } @@ -390,57 +390,57 @@ TEST_F(G1GCFullGCTest, TestExplicitFullNearLimit) std::string gctype = static_cast(GCStringFromType(GCType::G1_GC)); SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); [[maybe_unused]] size_t bytes, raw_objects_size; [[maybe_unused]] size_t young_size = reinterpret_cast( - reinterpret_cast(object_allocator_)->GetHeapSpace()) + reinterpret_cast(object_allocator)->GetHeapSpace()) ->GetCurrentMaxYoungSize(); [[maybe_unused]] size_t heap_size = mem::MemConfig::GetHeapSizeLimit(); - [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator_); + [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator); [[maybe_unused]] size_t max_y_size = g1_alloc->GetYoungAllocMaxSize(); [[maybe_unused]] auto y_space_check = [&](uintptr_t addr) -> bool { return IsInYoung(addr); }; [[maybe_unused]] auto h_space_check = [&](uintptr_t addr) -> bool { return !IsInYoung(addr); }; [[maybe_unused]] auto t_free = - reinterpret_cast(object_allocator_->GetHeapSpace())->GetCurrentFreeTenuredSize(); + reinterpret_cast(object_allocator->GetHeapSpace())->GetCurrentFreeTenuredSize(); const size_t y_obj_size = max_y_size / 10; - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - size_t initial_heap = ms_->GetFootprintHeap(); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + size_t initial_heap = ms->GetFootprintHeap(); { // Allocating until we are close to OOM, then do release this mem, // do explicit full and allocate the same size once again - auto old_root_size = root_size_; + auto old_root_size = root_size; int i = 0; while (t_free > 2.2 * young_size) { ObjVec ov1 = MakeAllocations(y_obj_size, y_obj_size, 1, &bytes, &raw_objects_size, y_space_check); MakeObjectsAlive(ov1, 1); - t_free = reinterpret_cast(object_allocator_->GetHeapSpace()) + t_free = reinterpret_cast(object_allocator->GetHeapSpace()) ->GetCurrentFreeTenuredSize(); i++; } - gc_->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); - MakeObjectsGarbage(old_root_size, root_size_); + gc->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); + MakeObjectsGarbage(old_root_size, root_size); - old_root_size = root_size_; + old_root_size = root_size; while (--i > 0) { ObjVec ov1 = MakeAllocations(y_obj_size, y_obj_size, 1, &bytes, &raw_objects_size, y_space_check); MakeObjectsAlive(ov1, 1); - t_free = reinterpret_cast(object_allocator_->GetHeapSpace()) + t_free = reinterpret_cast(object_allocator->GetHeapSpace()) ->GetCurrentFreeTenuredSize(); } - MakeObjectsGarbage(old_root_size, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + MakeObjectsGarbage(old_root_size, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); } - MakeObjectsGarbage(0, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(initial_heap, ms_->GetFootprintHeap()); + MakeObjectsGarbage(0, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(initial_heap, ms->GetFootprintHeap()); } ResetRuntime(); } @@ -450,56 +450,56 @@ TEST_F(G1GCFullGCTest, TestOOMFullNearLimit) std::string gctype = static_cast(GCStringFromType(GCType::G1_GC)); SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); [[maybe_unused]] size_t bytes, raw_objects_size; [[maybe_unused]] size_t young_size = reinterpret_cast( - reinterpret_cast(object_allocator_)->GetHeapSpace()) + reinterpret_cast(object_allocator)->GetHeapSpace()) ->GetCurrentMaxYoungSize(); [[maybe_unused]] size_t heap_size = mem::MemConfig::GetHeapSizeLimit(); - [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator_); + [[maybe_unused]] auto g1_alloc = reinterpret_cast *>(object_allocator); [[maybe_unused]] size_t max_y_size = g1_alloc->GetYoungAllocMaxSize(); [[maybe_unused]] auto y_space_check = [&](uintptr_t addr) -> bool { return IsInYoung(addr); }; [[maybe_unused]] auto h_space_check = [&](uintptr_t addr) -> bool { return !IsInYoung(addr); }; [[maybe_unused]] auto t_free = - reinterpret_cast(object_allocator_->GetHeapSpace())->GetCurrentFreeTenuredSize(); + reinterpret_cast(object_allocator->GetHeapSpace())->GetCurrentFreeTenuredSize(); const size_t y_obj_size = max_y_size / 10; - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - size_t initial_heap = ms_->GetFootprintHeap(); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + size_t initial_heap = ms->GetFootprintHeap(); { // Allocating until we are close to OOM, then do release this mem, // then allocate the same size once again checking if we can handle it w/o OOM - auto old_root_size = root_size_; + auto old_root_size = root_size; int i = 0; while (t_free > 2.2 * young_size) { ObjVec ov1 = MakeAllocations(y_obj_size, y_obj_size, 1, &bytes, &raw_objects_size, y_space_check); MakeObjectsAlive(ov1, 1); - t_free = reinterpret_cast(object_allocator_->GetHeapSpace()) + t_free = reinterpret_cast(object_allocator->GetHeapSpace()) ->GetCurrentFreeTenuredSize(); i++; } - MakeObjectsGarbage(old_root_size, root_size_); + MakeObjectsGarbage(old_root_size, root_size); - old_root_size = root_size_; + old_root_size = root_size; while (--i > 0) { ObjVec ov1 = MakeAllocations(y_obj_size, y_obj_size, 1, &bytes, &raw_objects_size, y_space_check); MakeObjectsAlive(ov1, 1); - t_free = reinterpret_cast(object_allocator_->GetHeapSpace()) + t_free = reinterpret_cast(object_allocator->GetHeapSpace()) ->GetCurrentFreeTenuredSize(); } - MakeObjectsGarbage(old_root_size, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + MakeObjectsGarbage(old_root_size, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); } - MakeObjectsGarbage(0, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(initial_heap, ms_->GetFootprintHeap()); + MakeObjectsGarbage(0, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(initial_heap, ms->GetFootprintHeap()); } ResetRuntime(); } diff --git a/runtime/tests/g1gc_test.cpp b/runtime/tests/g1gc_test.cpp index 8fdf97720dfb3c6d7aafd1a0ececda943295dfa2..5acca7102304bfe1a787f3eb2fa29e37cc88e7c2 100644 --- a/runtime/tests/g1gc_test.cpp +++ b/runtime/tests/g1gc_test.cpp @@ -197,13 +197,13 @@ TEST_F(G1GCTest, NonMovable2YoungRef) ScopedManagedCodeThread s(thread); [[maybe_unused]] HandleScope scope(thread); - static constexpr size_t array_length = 100; + static constexpr size_t ARRAY_LENGTH = 100; coretypes::Array *non_movable_obj = nullptr; uintptr_t prev_young_addr = 0; Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY) ->GetClass(ctx.GetStringArrayClassDescriptor()); ASSERT_NE(klass, nullptr); - non_movable_obj = coretypes::Array::Create(klass, array_length, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT); + non_movable_obj = coretypes::Array::Create(klass, ARRAY_LENGTH, SpaceType::SPACE_TYPE_NON_MOVABLE_OBJECT); coretypes::String *young_obj = coretypes::String::CreateEmptyString(ctx, runtime->GetPandaVM()); non_movable_obj->Set(0, young_obj); prev_young_addr = ToUintPtr(young_obj); @@ -320,8 +320,8 @@ TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject) [[maybe_unused]] HandleScope scope_for_young_obj(thread); // 1MB array - static constexpr size_t humongous_array_length = 262144LU; - static constexpr size_t young_array_length = ((DEFAULT_REGION_SIZE - Region::HeadSize()) / 4U) - 16U; + static constexpr size_t HUMONGOUS_ARRAY_LENGTH = 262144LU; + static constexpr size_t YOUNG_ARRAY_LENGTH = ((DEFAULT_REGION_SIZE - Region::HeadSize()) / 4U) - 16U; coretypes::Array *humongous_obj; coretypes::Array *young_arr; @@ -334,7 +334,7 @@ TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject) ->GetClass(ctx.GetStringArrayClassDescriptor()); ASSERT_NE(klass, nullptr); - young_arr = coretypes::Array::Create(klass, young_array_length); + young_arr = coretypes::Array::Create(klass, YOUNG_ARRAY_LENGTH); ASSERT_NE(young_arr, nullptr); auto *region = ObjectToRegion(young_arr); ASSERT_NE(region, nullptr); @@ -344,7 +344,7 @@ TEST_F(G1GCTest, CheckRemsetToHumongousAfterReclaimHumongousObject) { [[maybe_unused]] HandleScope scope_for_humongous_obj(thread); - humongous_obj = coretypes::Array::Create(klass, humongous_array_length); + humongous_obj = coretypes::Array::Create(klass, HUMONGOUS_ARRAY_LENGTH); ASSERT_NE(humongous_obj, nullptr); // add humongous object to our remset diff --git a/runtime/tests/gc_log_test.cpp b/runtime/tests/gc_log_test.cpp index b808d9a5485d1aadb75e174fa7b2335a44248e02..bb39c4347444aa07ffd21c145af7070b5d0f9f4f 100644 --- a/runtime/tests/gc_log_test.cpp +++ b/runtime/tests/gc_log_test.cpp @@ -91,12 +91,14 @@ public: for (size_t i = 1; i < iterations; i++) { testing::internal::CaptureStderr(); task.Run(*gc); - expected_log = '[' + std::to_string(GetGCCounter(gc)) + ']'; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = '[' + std::to_string(GetGCCounter(gc)) + ']'; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" + << expected_log_ << "\nLog:\n" + << log_; ASSERT(GetGCCounter(gc) == i); - task.reason_ = static_cast(i % number_of_GC_causes == 0 ? i % number_of_GC_causes + 1 - : i % number_of_GC_causes); + task.reason_ = static_cast(i % number_of_gc_causes_ == 0 ? i % number_of_gc_causes_ + 1 + : i % number_of_gc_causes_); } } @@ -113,23 +115,23 @@ public: testing::internal::CaptureStderr(); task.reason_ = GCTaskCause::YOUNG_GC_CAUSE; task.Run(*gc); - expected_log = is_stw ? "[FULL (Young)]" : "[YOUNG (Young)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = is_stw ? "[FULL (Young)]" : "[YOUNG (Young)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; testing::internal::CaptureStderr(); task.reason_ = GCTaskCause::HEAP_USAGE_THRESHOLD_CAUSE; task.Run(*gc); - expected_log = is_stw ? "[FULL (Threshold)]" : "[TENURED (Threshold)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = is_stw ? "[FULL (Threshold)]" : "[TENURED (Threshold)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; testing::internal::CaptureStderr(); task.reason_ = GCTaskCause::OOM_CAUSE; task.Run(*gc); - expected_log = "[FULL (OOM)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = "[FULL (OOM)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; } // GCCollectionType order is important @@ -139,11 +141,11 @@ public: static_assert(GCCollectionType::TENURED < GCCollectionType::FULL); protected: - std::string expected_log; - std::string log; + std::string expected_log_; + std::string log_; private: - const size_t number_of_GC_causes = 8; + const size_t number_of_gc_causes_ = 8; }; TEST_F(GCTestLog, StwFullLogTest) @@ -182,9 +184,9 @@ TEST_F(GCTestLog, GenGCYoungCauseFullCollectionLogTest) GCTask task(GCTaskCause::YOUNG_GC_CAUSE); testing::internal::CaptureStderr(); task.Run(*gc); - expected_log = "[FULL (Young)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = "[FULL (Young)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; } TEST_F(GCTestLog, G1GCIdleLaunchLogTest) @@ -225,9 +227,9 @@ TEST_F(GCTestLog, G1GCMixedCollectionLogTest) GCTask task(GCTaskCause::YOUNG_GC_CAUSE); task.Run(*gc); } - expected_log = "[YOUNG (Young)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = "[YOUNG (Young)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; testing::internal::CaptureStderr(); VMHandle current; @@ -250,9 +252,9 @@ TEST_F(GCTestLog, G1GCMixedCollectionLogTest) GCTask task2(GCTaskCause::YOUNG_GC_CAUSE); task2.Run(*gc); } - expected_log = "[MIXED (Young)]"; - log = testing::internal::GetCapturedStderr(); - ASSERT_NE(log.find(expected_log), std::string::npos) << "Expected:\n" << expected_log << "\nLog:\n" << log; + expected_log_ = "[MIXED (Young)]"; + log_ = testing::internal::GetCapturedStderr(); + ASSERT_NE(log_.find(expected_log_), std::string::npos) << "Expected:\n" << expected_log_ << "\nLog:\n" << log_; } TEST_F(GCTestLog, StwGCCounterLogTest) diff --git a/runtime/tests/humongous_obj_allocator_test.cpp b/runtime/tests/humongous_obj_allocator_test.cpp index 5574a0dba1d92d249464074c55ce78d62db725dd..3c9386c50f5e2b48e4d4aa67a38cc62b491ec549 100644 --- a/runtime/tests/humongous_obj_allocator_test.cpp +++ b/runtime/tests/humongous_obj_allocator_test.cpp @@ -226,7 +226,7 @@ TEST_F(HumongousObjAllocatorTest, MTAllocFreeTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocFreeTest(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); + MtAllocFreeTest(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); ClearPoolManager(); } } @@ -243,7 +243,7 @@ TEST_F(HumongousObjAllocatorTest, MTAllocIterateTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocIterateTest( + MtAllocIterateTest( MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity()); ClearPoolManager(); } @@ -261,7 +261,7 @@ TEST_F(HumongousObjAllocatorTest, MTAllocCollectTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocCollectTest(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); + MtAllocCollectTest(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); ClearPoolManager(); } } diff --git a/runtime/tests/i2c_bridge_test.cpp b/runtime/tests/i2c_bridge_test.cpp index 5081dad3f3d5cd45f7ecb0894b779767d591e35a..423f81fa89b0c3dc0e294ab18a4bbda775fc9936 100644 --- a/runtime/tests/i2c_bridge_test.cpp +++ b/runtime/tests/i2c_bridge_test.cpp @@ -35,7 +35,7 @@ using Opcode = panda::BytecodeInstruction::Opcode; namespace panda::test { -static std::string g_call_result; +static std::string G_CALL_RESULT; class InterpreterToCompiledCodeBridgeTest : public testing::Test { public: @@ -49,7 +49,7 @@ public: thread_ = MTManagedThread::GetCurrent(); thread_->ManagedCodeBegin(); - g_call_result = ""; + G_CALL_RESULT = ""; } ~InterpreterToCompiledCodeBridgeTest() @@ -94,7 +94,11 @@ template std::string ArgsToString(const Arg &arg) { std::ostringstream out; - out << arg; + if constexpr (std::is_integral_v) { + out << std::hex << "0x" << arg; + } else { + out << arg; + } return out.str(); } @@ -102,7 +106,7 @@ template std::string ArgsToString(const Arg &a0, Args... args) { std::ostringstream out; - out << a0 << ", " << ArgsToString(args...); + out << ArgsToString(a0) << ", " << ArgsToString(args...); return out.str(); } @@ -116,13 +120,13 @@ std::string PrintFunc(const char *ret, const char *name, Args... args) static void VoidNoArg(Method *method) { - g_call_result = PrintFunc("void", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method); } -template +template static Frame *CreateFrame(size_t nregs, Method *method, Frame *prev) { - return panda::CreateFrameWithSize(Frame::GetActualSize(nregs), nregs, method, prev); + return panda::CreateFrameWithSize(Frame::GetActualSize(nregs), nregs, method, prev); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVoidNoArg) @@ -133,25 +137,25 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVoidNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidNoArg", &callee)); uint8_t insn2[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn2, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidNoArg", &callee)); - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidNoArg", &callee)); FreeFrame(frame); } -static void InstanceVoidNoArg(Method *method, ObjectHeader *this_) +static void InstanceVoidNoArg(Method *method, ObjectHeader *this_header) { - g_call_result = PrintFunc("void", __FUNCTION__, method, this_); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, this_header); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInstanceVoidNoArg) @@ -168,26 +172,26 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInstanceVoidNoArg) frame_handler.GetVReg(0).SetReference(obj2); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidNoArg", &callee, obj2)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidNoArg", &callee, obj2)); uint8_t insn2[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn2, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidNoArg", &callee, obj1)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidNoArg", &callee, obj1)); - g_call_result = ""; + G_CALL_RESULT = ""; int64_t args[] = {static_cast(ToUintPtr(obj2))}; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidNoArg", &callee, obj2)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidNoArg", &callee, obj2)); FreeFrame(frame); } static uint8_t ByteNoArg(Method *method) { - g_call_result = PrintFunc("uint8_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("uint8_t", __FUNCTION__, method); return uint8_t(5); } @@ -199,31 +203,31 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeByteNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("uint8_t", "ByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("uint8_t", "ByteNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), uint8_t(5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); uint8_t insn_acc[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn_acc, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("uint8_t", "ByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("uint8_t", "ByteNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), uint8_t(5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); EXPECT_EQ(int32_t(res.value), uint8_t(5)); EXPECT_EQ(res.tag, 0UL); - EXPECT_EQ(g_call_result, PrintFunc("uint8_t", "ByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("uint8_t", "ByteNoArg", &callee)); FreeFrame(frame); } static int8_t SignedByteNoArg(Method *method) { - g_call_result = PrintFunc("int8_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("int8_t", __FUNCTION__, method); return int8_t(-5); } @@ -235,31 +239,31 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeSignedByteNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("int8_t", "SignedByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("int8_t", "SignedByteNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), int8_t(-5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); uint8_t insn_acc[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn_acc, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("int8_t", "SignedByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("int8_t", "SignedByteNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), int8_t(-5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); EXPECT_EQ(int32_t(res.value), int8_t(-5)); EXPECT_EQ(res.tag, 0UL); - EXPECT_EQ(g_call_result, PrintFunc("int8_t", "SignedByteNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("int8_t", "SignedByteNoArg", &callee)); FreeFrame(frame); } static bool BoolNoArg(Method *method) { - g_call_result = PrintFunc("bool", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("bool", __FUNCTION__, method); return true; } @@ -271,24 +275,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeBoolNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("bool", "BoolNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("bool", "BoolNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), true); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); EXPECT_EQ(int32_t(res.value), true); EXPECT_EQ(res.tag, 0UL); - EXPECT_EQ(g_call_result, PrintFunc("bool", "BoolNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("bool", "BoolNoArg", &callee)); FreeFrame(frame); } static uint16_t ShortNoArg(Method *method) { - g_call_result = PrintFunc("uint16_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("uint16_t", __FUNCTION__, method); return uint16_t(5); } @@ -300,24 +304,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeShortNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("uint16_t", "ShortNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("uint16_t", "ShortNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), uint16_t(5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); EXPECT_EQ(int32_t(res.value), uint16_t(5)); EXPECT_EQ(res.tag, 0UL); - EXPECT_EQ(g_call_result, PrintFunc("uint16_t", "ShortNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("uint16_t", "ShortNoArg", &callee)); FreeFrame(frame); } static int16_t SignedShortNoArg(Method *method) { - g_call_result = PrintFunc("int16_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("int16_t", __FUNCTION__, method); return int16_t(-5); } @@ -329,24 +333,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeSignedShortNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - EXPECT_EQ(g_call_result, PrintFunc("int16_t", "SignedShortNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("int16_t", "SignedShortNoArg", &callee)); EXPECT_EQ(frame->GetAcc().Get(), int16_t(-5)); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); EXPECT_EQ(int32_t(res.value), int16_t(-5)); EXPECT_EQ(res.tag, 0UL); - EXPECT_EQ(g_call_result, PrintFunc("int16_t", "SignedShortNoArg", &callee)); + EXPECT_EQ(G_CALL_RESULT, PrintFunc("int16_t", "SignedShortNoArg", &callee)); FreeFrame(frame); } static int32_t IntNoArg(Method *method) { - g_call_result = PrintFunc("int32_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("int32_t", __FUNCTION__, method); return 5; } @@ -358,24 +362,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeIntNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("int32_t", "IntNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("int32_t", "IntNoArg", &callee)); ASSERT_EQ(frame->GetAcc().Get(), 5); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); ASSERT_EQ(res.value, 5); EXPECT_EQ(res.tag, 0UL); - ASSERT_EQ(g_call_result, PrintFunc("int32_t", "IntNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("int32_t", "IntNoArg", &callee)); FreeFrame(frame); } static int64_t LongNoArg(Method *method) { - g_call_result = PrintFunc("int64_t", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("int64_t", __FUNCTION__, method); return 8; } @@ -387,24 +391,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeLongNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("int64_t", "LongNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("int64_t", "LongNoArg", &callee)); ASSERT_EQ(frame->GetAcc().Get(), 8); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); ASSERT_EQ(res.value, 8); EXPECT_EQ(res.tag, 0UL); - ASSERT_EQ(g_call_result, PrintFunc("int64_t", "LongNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("int64_t", "LongNoArg", &callee)); FreeFrame(frame); } static double DoubleNoArg(Method *method) { - g_call_result = PrintFunc("double", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("double", __FUNCTION__, method); return 3.0; } @@ -416,24 +420,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeDoubleNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("double", "DoubleNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("double", "DoubleNoArg", &callee)); ASSERT_EQ(frame->GetAcc().GetDouble(), 3.0); EXPECT_EQ(frame->GetAcc().GetTag(), 0); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); ASSERT_EQ(bit_cast(res.value), 3.0); EXPECT_EQ(res.tag, 0UL); - ASSERT_EQ(g_call_result, PrintFunc("double", "DoubleNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("double", "DoubleNoArg", &callee)); FreeFrame(frame); } static ObjectHeader *ObjNoArg(Method *method) { - g_call_result = PrintFunc("Object", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("Object", __FUNCTION__, method); return nullptr; } @@ -445,24 +449,24 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeObjNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("Object", "ObjNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("Object", "ObjNoArg", &callee)); ASSERT_EQ(frame->GetAcc().GetReference(), nullptr); EXPECT_EQ(frame->GetAcc().GetTag(), 1); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); ASSERT_EQ(reinterpret_cast(res.value), nullptr); EXPECT_EQ(res.tag, 1UL); - ASSERT_EQ(g_call_result, PrintFunc("Object", "ObjNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("Object", "ObjNoArg", &callee)); FreeFrame(frame); } static DecodedTaggedValue VRegNoArg(Method *method) { - g_call_result = PrintFunc("vreg", __FUNCTION__, method); + G_CALL_RESULT = PrintFunc("vreg", __FUNCTION__, method); return DecodedTaggedValue(5, 7); } @@ -474,15 +478,15 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVRegNoArg) Frame *frame = CreateFrame(0, nullptr, nullptr); uint8_t insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("vreg", "VRegNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("vreg", "VRegNoArg", &callee)); ASSERT_EQ(frame->GetAcc().GetValue(), 5); ASSERT_EQ(frame->GetAcc().GetTag(), 7); - g_call_result = ""; + G_CALL_RESULT = ""; DecodedTaggedValue res = InvokeCompiledCodeWithArgArray(nullptr, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("vreg", "VRegNoArg", &callee)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("vreg", "VRegNoArg", &callee)); ASSERT_EQ(res.value, 5); ASSERT_EQ(res.tag, 7); @@ -491,7 +495,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVRegNoArg) static void VoidInt(Method *method, int32_t a0) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInt) @@ -503,33 +507,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInt) frame->GetVReg(1).Set(5); uint8_t call_short_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x01, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_short_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidInt", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidInt", &callee, 5)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x01, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidInt", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidInt", &callee, 5)); - g_call_result = ""; + G_CALL_RESULT = ""; int64_t arg = 5; InvokeCompiledCodeWithArgArray(&arg, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidInt", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidInt", &callee, 5)); frame->GetVReg(1).Set(0); frame->GetAcc().SetValue(5); uint8_t call_acc_insn[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x00, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidInt", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidInt", &callee, 5)); FreeFrame(frame); } -static void InstanceVoidInt(Method *method, ObjectHeader *this_, int32_t a0) +static void InstanceVoidInt(Method *method, ObjectHeader *this_header, int32_t a0) { - g_call_result = PrintFunc("void", __FUNCTION__, method, this_, a0); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, this_header, a0); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInstanceInt) @@ -544,33 +548,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeInstanceInt) frame_handler.GetVReg(1).Set(5); uint8_t call_short_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x10, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_short_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); - g_call_result = ""; + G_CALL_RESULT = ""; int64_t args[] = {static_cast(ToUintPtr(obj)), 5}; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); frame_handler.GetVReg(1).Set(0); frame_handler.GetAcc().SetValue(5); uint8_t call_acc_insn[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x10, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "InstanceVoidInt", &callee, obj, 5)); FreeFrame(frame); } static void VoidVReg(Method *method, int64_t value) { - g_call_result = PrintFunc("void", __FUNCTION__, method, value); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, value); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVReg) @@ -583,32 +587,32 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeVReg) frame_handler.GetVReg(1).Set(5); uint8_t call_short_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x01, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_short_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidVReg", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidVReg", &callee, 5)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x01, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidVReg", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidVReg", &callee, 5)); - g_call_result = ""; + G_CALL_RESULT = ""; int64_t arg[] = {5, 8}; InvokeCompiledCodeWithArgArray(arg, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidVReg", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidVReg", &callee, 5)); frame_handler.GetVReg(1).Set(0); frame_handler.GetAcc().SetValue(5); uint8_t call_acc_short[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x01, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_short, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidVReg", &callee, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidVReg", &callee, 5)); FreeFrame(frame); } static void VoidIntVReg(Method *method, int32_t a0, int64_t value) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, value); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, value); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeIntVReg) @@ -622,26 +626,26 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeIntVReg) frame_handler.GetVReg(1).Set(5); uint8_t call_short_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x10, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_short_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); - g_call_result = ""; + G_CALL_RESULT = ""; int64_t arg[] = {2, 5, 8}; InvokeCompiledCodeWithArgArray(arg, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); frame_handler.GetAcc().SetValue(5); frame_handler.GetVReg(1).Set(0); uint8_t call_acc_short_insn[] = {static_cast(Opcode::CALL_ACC_SHORT_V4_IMM4_ID16), 0x10, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_short_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidIntVReg", &callee, 2, 5)); FreeFrame(frame); } @@ -649,7 +653,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeIntVReg) // arm max number of register parameters static void Void3Int(Method *method, int32_t a0, int32_t a1, int32_t a2) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke3Int) @@ -665,32 +669,32 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke3Int) frame_handler.GetVReg(2).Set(3); uint8_t call_insn[] = {static_cast(Opcode::CALL_V4_V4_V4_V4_ID16), 0x10, 0x02, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); // callee(acc, v1, v2) uint8_t call_acc_insn[] = {static_cast(Opcode::CALL_ACC_V4_V4_V4_IMM4_ID16), 0x21, 0x00, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void3Int", &callee, 0, 2, 3)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void3Int", &callee, 0, 2, 3)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); int64_t args[] = {1, 2, 3}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void3Int", &callee, 1, 2, 3)); FreeFrame(frame); } static void Void2IntLongInt(Method *method, int32_t a0, int32_t a1, int64_t a2, int32_t a3) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke2IntLongInt) @@ -706,33 +710,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke2IntLongInt) frame_handler.GetVReg(3).Set(4); uint8_t call_insn[] = {static_cast(Opcode::CALL_V4_V4_V4_V4_ID16), 0x10, 0x32, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); int64_t args[] = {1, 2, 3, 4}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); frame_handler.GetVReg(2).Set(0); frame_handler.GetAcc().SetValue(3); uint8_t call_acc_insn[] = {static_cast(Opcode::CALL_ACC_V4_V4_V4_IMM4_ID16), 0x10, 0x23, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2IntLongInt", &callee, 1, 2, 3, 4)); FreeFrame(frame); } static void VoidLong(Method *method, int64_t a0) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeLong) @@ -745,26 +749,26 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeLong) frame_handler.GetVReg(0).Set(9); uint8_t call_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidLong", &callee, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidLong", &callee, 9)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidLong", &callee, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidLong", &callee, 9)); int64_t args[] = {9}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidLong", &callee, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidLong", &callee, 9)); FreeFrame(frame); } static void VoidDouble(Method *method, double a0) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0); } TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeDouble) @@ -777,26 +781,26 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeDouble) frame_handler.GetVReg(0).Set(4.0); uint8_t call_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidDouble", &callee, 4.0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidDouble", &callee, 4.0)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidDouble", &callee, 4.0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidDouble", &callee, 4.0)); int64_t args[] = {bit_cast(4.0)}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "VoidDouble", &callee, 4.0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "VoidDouble", &callee, 4.0)); FreeFrame(frame); } static void Void4Int(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke4Int) @@ -812,33 +816,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke4Int) frame_handler.GetVReg(3).Set(4); uint8_t call_insn[] = {static_cast(Opcode::CALL_V4_V4_V4_V4_ID16), 0x10, 0x32, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); int64_t args[] = {1, 2, 3, 4}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); frame_handler.GetVReg(3).Set(0); frame_handler.GetAcc().SetValue(4); uint8_t call_acc_insn[] = {static_cast(Opcode::CALL_ACC_V4_V4_V4_IMM4_ID16), 0x10, 0x32, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_acc_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4Int", &callee, 1, 2, 3, 4)); FreeFrame(frame); } static void Void2Long(Method *method, int64_t a0, int64_t a1) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke2Long) @@ -852,26 +856,26 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke2Long) frame_handler.GetVReg(1).Set(9); uint8_t call_insn[] = {static_cast(Opcode::CALL_SHORT_V4_V4_ID16), 0x10, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2Long", &callee, 3, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2Long", &callee, 3, 9)); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2Long", &callee, 3, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2Long", &callee, 3, 9)); int64_t args[] = {3, 9}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void2Long", &callee, 3, 9)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void2Long", &callee, 3, 9)); FreeFrame(frame); } static void Void4IntDouble(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3, double a4) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke4IntDouble) @@ -888,14 +892,14 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke4IntDouble) frame_handler.GetVReg(4).Set(5.0); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4IntDouble", &callee, 1, 2, 3, 4, 5.0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4IntDouble", &callee, 1, 2, 3, 4, 5.0)); int64_t args[] = {1, 2, 3, 4, bit_cast(5.0)}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void4IntDouble", &callee, 1, 2, 3, 4, 5.0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void4IntDouble", &callee, 1, 2, 3, 4, 5.0)); FreeFrame(frame); } @@ -903,7 +907,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke4IntDouble) // aarch64 max number of register parameters static void Void7Int(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3, int32_t a4, int32_t a5, int32_t a6) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int) @@ -923,14 +927,14 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int) frame_handler.GetVReg(6).Set(7); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7Int", &callee, 1, 2, 3, 4, 5, 6, 7)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7Int", &callee, 1, 2, 3, 4, 5, 6, 7)); int64_t args[] = {1, 2, 3, 4, 5, 6, 7}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7Int", &callee, 1, 2, 3, 4, 5, 6, 7)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7Int", &callee, 1, 2, 3, 4, 5, 6, 7)); FreeFrame(frame); } @@ -939,7 +943,7 @@ static void Void7Int8Double(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a6, double d0, double d1, double d2, double d3, double d4, double d5, double d6, double d7) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, d0, d1, d2, d3, d4, d5, d6, d7); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, d0, d1, d2, d3, d4, d5, d6, d7); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int8Double) @@ -968,9 +972,9 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int8Double) frame_handler.GetVReg(14).Set(15.0); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7Int8Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8.0, 9.0, 10.0, 11.0, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7Int8Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0)); int64_t args[] = {1, @@ -988,9 +992,9 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int8Double) bit_cast(13.0), bit_cast(14.0), bit_cast(15.0)}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7Int8Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8.0, 9.0, 10.0, 11.0, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7Int8Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0)); FreeFrame(frame); @@ -999,7 +1003,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7Int8Double) static void Void8Int(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3, int32_t a4, int32_t a5, int32_t a6, int32_t a7) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, a7); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, a7); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke8Int) @@ -1020,14 +1024,14 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke8Int) frame_handler.GetVReg(7).Set(8); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void8Int", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void8Int", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); int64_t args[] = {1, 2, 3, 4, 5, 6, 7, 8}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void8Int", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void8Int", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); FreeFrame(frame); } @@ -1035,7 +1039,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke8Int) static void Void6IntVReg(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3, int32_t a4, int32_t a5, int64_t value) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, value); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, value); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke6IntVReg) @@ -1055,14 +1059,14 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke6IntVReg) frame_handler.GetVReg(6).Set(7); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void6IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void6IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7)); int64_t args[] = {1, 2, 3, 4, 5, 6, 7, 8}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void6IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void6IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7)); FreeFrame(frame); } @@ -1070,7 +1074,7 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke6IntVReg) static void Void7IntVReg(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a3, int32_t a4, int32_t a5, int32_t a6, int64_t value) { - g_call_result = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, value); + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, value); } TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7IntVReg) @@ -1091,14 +1095,14 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke7IntVReg) frame_handler.GetVReg(7).Set(8); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); int64_t args[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void7IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void7IntVReg", &callee, 1, 2, 3, 4, 5, 6, 7, 8)); FreeFrame(frame); } @@ -1107,7 +1111,7 @@ static void Void8Int9Double(Method *method, int32_t a0, int32_t a1, int32_t a2, int32_t a6, int32_t a7, double d0, double d1, double d2, double d3, double d4, double d5, double d6, double d7, double d8) { - g_call_result = + G_CALL_RESULT = PrintFunc("void", __FUNCTION__, method, a0, a1, a2, a3, a4, a5, a6, a7, d0, d1, d2, d3, d4, d5, d6, d7, d8); } @@ -1139,9 +1143,9 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke8Int9Double) frame_handler.GetVReg(16).Set(17.0); uint8_t call_range_insn[] = {static_cast(Opcode::CALL_RANGE_V8_ID16), 0x00, 0, 0, 0, 0}; - g_call_result = ""; + G_CALL_RESULT = ""; InterpreterToCompiledCodeBridge(call_range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void8Int9Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8, 9.0, 10.0, 11.0, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void8Int9Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0)); int64_t args[] = {1, @@ -1161,18 +1165,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, Invoke8Int9Double) bit_cast(15.0), bit_cast(16.0), bit_cast(17.0)}; - g_call_result = ""; + G_CALL_RESULT = ""; InvokeCompiledCodeWithArgArray(args, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("void", "Void8Int9Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8, 9.0, 10.0, 11.0, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("void", "Void8Int9Double", &callee, 1, 2, 3, 4, 5, 6, 7, 8, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0)); FreeFrame(frame); } #if !defined(PANDA_TARGET_ARM32) && !defined(PANDA_TARGET_X86) -static TaggedValue NoArgDyn(Method *method, uint32_t num_args) + +extern "C" { +using I2CBridgeTestDynFn = TaggedValue(Method *, uint32_t, TaggedType *); +// NOLINTNEXTLINE(readability-identifier-naming) +extern I2CBridgeTestDynFn *I2CBridgeTestDynCallee; +// NOLINTNEXTLINE(readability-identifier-naming) +extern const void *I2CBridgeTestDynWrapper; +} + +static void SetupDynCallee(Method *callee, I2CBridgeTestDynFn *fn) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args); + I2CBridgeTestDynCallee = fn; + callee->SetCompiledEntryPoint(reinterpret_cast(&I2CBridgeTestDynWrapper)); +} + +static TaggedValue NoArgDyn(Method *method, uint32_t num_args, [[maybe_unused]] TaggedType *args) +{ + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args); return TaggedValue(77.0); } @@ -1183,33 +1202,34 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeNoArgDyn) TaggedValue value_out(77.0); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(NoArgDyn)); + SetupDynCallee(&callee, &NoArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_SHORT_IMM4_V4_V4_V4), 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "NoArgDyn", &callee, 0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "NoArgDyn", &callee, 0)); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x00, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "NoArgDyn", &callee, 0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "NoArgDyn", &callee, 0)); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(nullptr, 0, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "NoArgDyn", &callee, 0)); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "NoArgDyn", &callee, 0)); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue OneArgDyn(Method *method, uint32_t num_args, int64_t arg0) +static TaggedValue OneArgDyn(Method *method, uint32_t num_args, TaggedType *args) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0]); return TaggedValue(24.0); } @@ -1222,43 +1242,40 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeOneArgDyn) frame_handler.GetVReg(0).SetValue(value_in0.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(OneArgDyn)); + SetupDynCallee(&callee, &OneArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_SHORT_IMM4_V4_V4_V4), 0x01, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x01, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 1, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "OneArgDyn", &callee, 1, value_in0.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue OneVarArgDyn(Method *method, uint32_t num_args, ...) +static TaggedValue OneVarArgDyn(Method *method, uint32_t num_args, TaggedType *args) { if (num_args != 1) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args); + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args); return TaggedValue(1.0); } - va_list args; - va_start(args, num_args); - int64_t arg0 = va_arg(args, int64_t); - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0); - va_end(args); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0]); return TaggedValue(-1.0); } @@ -1266,32 +1283,33 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeOneVarArgDyn) { Frame *frame = CreateFrame(1, nullptr, nullptr); auto frame_handler = DynamicFrameHandler(frame); - TaggedValue value_in0(42.0); + TaggedValue value_in0(0xdead); TaggedValue value_out(-1.0); frame_handler.GetVReg(0).SetValue(value_in0.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(OneVarArgDyn)); + SetupDynCallee(&callee, &OneVarArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_SHORT_IMM4_V4_V4_V4), 0x01, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "OneVarArgDyn", &callee, 1, value_in0.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "OneVarArgDyn", &callee, 1, value_in0.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 1, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "OneVarArgDyn", &callee, 1, value_in0.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "OneVarArgDyn", &callee, 1, value_in0.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue TwoArgDyn(Method *method, uint32_t num_args, int64_t arg0, int64_t arg1) +static TaggedValue TwoArgDyn(Method *method, uint32_t num_args, TaggedType *args) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1]); return TaggedValue(64.0); } @@ -1306,44 +1324,40 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeTwoArgDyn) frame_handler.GetVReg(1).SetValue(value_in1.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(TwoArgDyn)); + SetupDynCallee(&callee, &TwoArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_SHORT_IMM4_V4_V4_V4), 0x02, 0x01}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x02, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 2, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "TwoArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue TwoVarArgDyn(Method *method, uint32_t num_args, ...) +static TaggedValue TwoVarArgDyn(Method *method, uint32_t num_args, TaggedType *args) { if (num_args != 2) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args); + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args); return TaggedValue(2.0); } - va_list args; - va_start(args, num_args); - int64_t arg0 = va_arg(args, int64_t); - int64_t arg1 = va_arg(args, int64_t); - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1); - va_end(args); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1]); return TaggedValue(-2.0); } @@ -1358,29 +1372,30 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeTwoVarArgDyn) frame_handler.GetVReg(1).SetValue(value_in1.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(TwoVarArgDyn)); + SetupDynCallee(&callee, &TwoVarArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_SHORT_IMM4_V4_V4_V4), 0x02, 0x01}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "TwoVarArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 2, frame, &callee, thread_); - ASSERT_EQ(g_call_result, + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "TwoVarArgDyn", &callee, 2, value_in0.GetRawData(), value_in1.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue ThreeArgDyn(Method *method, uint32_t num_args, int64_t arg0, int64_t arg1, int64_t arg2) +static TaggedValue ThreeArgDyn(Method *method, uint32_t num_args, TaggedType *args) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1, arg2); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1], args[2]); return TaggedValue(1961.0); } @@ -1397,48 +1412,43 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeThreeArgDyn) frame_handler.GetVReg(2).SetValue(value_in2.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(ThreeArgDyn)); + SetupDynCallee(&callee, &ThreeArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_IMM4_V4_V4_V4_V4_V4), 0x03, 0x21, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x03, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 3, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "ThreeArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue ThreeVarArgDyn(Method *method, uint32_t num_args, ...) +static TaggedValue ThreeVarArgDyn(Method *method, uint32_t num_args, TaggedType *args) { if (num_args != 3) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args); + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args); return TaggedValue(3.0); } - va_list args; - va_start(args, num_args); - int64_t arg0 = va_arg(args, int64_t); - int64_t arg1 = va_arg(args, int64_t); - int64_t arg2 = va_arg(args, int64_t); - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1, arg2); - va_end(args); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1], args[2]); return TaggedValue(-3.0); } @@ -1455,29 +1465,30 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeThreeVarArgDyn) frame_handler.GetVReg(2).SetValue(value_in2.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(ThreeVarArgDyn)); + SetupDynCallee(&callee, &ThreeVarArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_IMM4_V4_V4_V4_V4_V4), 0x03, 0x21, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "ThreeVarArgDyn", &callee, 3, value_in0.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "ThreeVarArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 3, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "ThreeVarArgDyn", &callee, 3, value_in0.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "ThreeVarArgDyn", &callee, 3, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue FourArgDyn(Method *method, uint32_t num_args, int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3) +static TaggedValue FourArgDyn(Method *method, uint32_t num_args, TaggedType *args) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1, arg2, arg3); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1], args[2], args[3]); return TaggedValue(3.14); } @@ -1496,49 +1507,43 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeFourArgDyn) frame_handler.GetVReg(3).SetValue(value_in3.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(FourArgDyn)); + SetupDynCallee(&callee, &FourArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_IMM4_V4_V4_V4_V4_V4), 0x04, 0x21, 0x03}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x04, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 4, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FourArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue FourVarArgDyn(Method *method, uint32_t num_args, ...) +static TaggedValue FourVarArgDyn(Method *method, uint32_t num_args, TaggedType *args) { if (num_args != 4) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args); + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args); return TaggedValue(4.0); } - va_list args; - va_start(args, num_args); - int64_t arg0 = va_arg(args, int64_t); - int64_t arg1 = va_arg(args, int64_t); - int64_t arg2 = va_arg(args, int64_t); - int64_t arg3 = va_arg(args, int64_t); - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1, arg2, arg3); - va_end(args); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1], args[2], args[3]); return TaggedValue(-4.0); } @@ -1557,30 +1562,30 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeFourVarArgDyn) frame_handler.GetVReg(3).SetValue(value_in3.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(FourVarArgDyn)); + SetupDynCallee(&callee, &FourVarArgDyn); uint8_t insn[] = {static_cast(Opcode::CALLI_DYN_IMM4_V4_V4_V4_V4_V4), 0x04, 0x21, 0x03}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FourVarArgDyn", &callee, 4, value_in0.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FourVarArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 4, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FourVarArgDyn", &callee, 4, value_in0.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FourVarArgDyn", &callee, 4, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); FreeFrame(frame); } -static TaggedValue FiveArgDyn(Method *method, uint32_t num_args, int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, - int64_t arg4) +static TaggedValue FiveArgDyn(Method *method, uint32_t num_args, TaggedType *args) { - g_call_result = PrintFunc("any", __FUNCTION__, method, num_args, arg0, arg1, arg2, arg3, arg4); + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + G_CALL_RESULT = PrintFunc("any", __FUNCTION__, method, num_args, args[0], args[1], args[2], args[3], args[4]); return TaggedValue(1515.0); } @@ -1601,21 +1606,21 @@ TEST_F(InterpreterToCompiledCodeBridgeTest, InvokeFiveArgDyn) frame_handler.GetVReg(4).SetValue(value_in4.GetRawData()); Method callee(nullptr, nullptr, panda_file::File::EntityId(), panda_file::File::EntityId(), ACC_STATIC, 0, nullptr); - callee.SetCompiledEntryPoint(reinterpret_cast(FiveArgDyn)); + SetupDynCallee(&callee, &FiveArgDyn); uint8_t range_insn[] = {static_cast(Opcode::CALLI_DYN_RANGE_IMM16_V16), 0x05, 0x00, 0x00, 0x00}; - g_call_result = ""; + G_CALL_RESULT = ""; frame_handler.GetAcc().SetValue(0); InterpreterToCompiledCodeBridgeDyn(range_insn, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FiveArgDyn", &callee, 5, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FiveArgDyn", &callee, 5, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData(), value_in4.GetRawData())); ASSERT_EQ(frame_handler.GetAcc().GetValue(), value_out.GetRawData()); uint64_t args[] = {value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData(), value_in4.GetRawData()}; - g_call_result = ""; + G_CALL_RESULT = ""; uint64_t res = InvokeCompiledCodeWithArgArrayDyn(args, 5, frame, &callee, thread_); - ASSERT_EQ(g_call_result, PrintFunc("any", "FiveArgDyn", &callee, 5, value_in0.GetRawData(), value_in1.GetRawData(), + ASSERT_EQ(G_CALL_RESULT, PrintFunc("any", "FiveArgDyn", &callee, 5, value_in0.GetRawData(), value_in1.GetRawData(), value_in2.GetRawData(), value_in3.GetRawData(), value_in4.GetRawData())); ASSERT_EQ(res, value_out.GetRawData()); diff --git a/runtime/tests/internal_allocator_test.cpp b/runtime/tests/internal_allocator_test.cpp index 35c2f4944fbcbf6b77503f1d07e16a936e974735..db83b89c2d52a0bb6950d6f41df4c1e60d3b81d7 100644 --- a/runtime/tests/internal_allocator_test.cpp +++ b/runtime/tests/internal_allocator_test.cpp @@ -78,20 +78,20 @@ TEST_F(InternalAllocatorTest, AvoidInfiniteLoopTest) } struct A { - static size_t count; + static size_t count_; A() { - value = ++count; + value = ++count_; } ~A() { - --count; + --count_; } uint8_t value; }; -size_t A::count = 0; +size_t A::count_ = 0; TEST_F(InternalAllocatorTest, NewDeleteArray) { @@ -100,12 +100,12 @@ TEST_F(InternalAllocatorTest, NewDeleteArray) auto arr = allocator_->New(COUNT); ASSERT_NE(arr, nullptr); ASSERT_EQ(ToUintPtr(arr) % DEFAULT_ALIGNMENT_IN_BYTES, 0); - ASSERT_EQ(A::count, COUNT); + ASSERT_EQ(A::count_, COUNT); for (uint8_t i = 1; i <= COUNT; ++i) { ASSERT_EQ(arr[i - 1].value, i); } allocator_->DeleteArray(arr); - ASSERT_EQ(A::count, 0); + ASSERT_EQ(A::count_, 0); } TEST_F(InternalAllocatorTest, ZeroSizeTest) diff --git a/runtime/tests/interpreter/test_runtime_interface.cpp b/runtime/tests/interpreter/test_runtime_interface.cpp index 776a5e3e7f602c5a39b2893fef969b0f9be06b52..0b072787e766ba7b2a0ac5e07bf1871e8f305e87 100644 --- a/runtime/tests/interpreter/test_runtime_interface.cpp +++ b/runtime/tests/interpreter/test_runtime_interface.cpp @@ -17,49 +17,49 @@ namespace panda::interpreter::test { -RuntimeInterface::NullPointerExceptionData RuntimeInterface::npe_data; +RuntimeInterface::NullPointerExceptionData RuntimeInterface::npe_data_; -RuntimeInterface::ArrayIndexOutOfBoundsExceptionData RuntimeInterface::array_oob_exception_data; +RuntimeInterface::ArrayIndexOutOfBoundsExceptionData RuntimeInterface::array_oob_exception_data_; -RuntimeInterface::NegativeArraySizeExceptionData RuntimeInterface::array_neg_size_exception_data; +RuntimeInterface::NegativeArraySizeExceptionData RuntimeInterface::array_neg_size_exception_data_; -RuntimeInterface::ArithmeticException RuntimeInterface::arithmetic_exception_data; +RuntimeInterface::ArithmeticException RuntimeInterface::arithmetic_exception_data_; -RuntimeInterface::ClassCastExceptionData RuntimeInterface::class_cast_exception_data; +RuntimeInterface::ClassCastExceptionData RuntimeInterface::class_cast_exception_data_; -RuntimeInterface::AbstractMethodError RuntimeInterface::abstract_method_error_data; +RuntimeInterface::AbstractMethodError RuntimeInterface::abstract_method_error_data_; -RuntimeInterface::ArrayStoreExceptionData RuntimeInterface::array_store_exception_data; +RuntimeInterface::ArrayStoreExceptionData RuntimeInterface::array_store_exception_data_; -coretypes::Array *RuntimeInterface::array_object; +coretypes::Array *RuntimeInterface::array_object_; -Class *RuntimeInterface::array_class; +Class *RuntimeInterface::array_class_; -uint32_t RuntimeInterface::array_length; +uint32_t RuntimeInterface::array_length_; -Class *RuntimeInterface::resolved_class; +Class *RuntimeInterface::resolved_class_; -ObjectHeader *RuntimeInterface::object; +ObjectHeader *RuntimeInterface::object_; -Class *RuntimeInterface::object_class; +Class *RuntimeInterface::object_class_; -uint32_t RuntimeInterface::catch_block_pc_offset; +uint32_t RuntimeInterface::catch_block_pc_offset_; -RuntimeInterface::InvokeMethodHandler RuntimeInterface::invoke_handler; +RuntimeInterface::InvokeMethodHandler RuntimeInterface::invoke_handler_; DummyGC::DummyGC(panda::mem::ObjectAllocatorBase *object_allocator, const panda::mem::GCSettings &settings) : GC(object_allocator, settings) { } -DummyGC RuntimeInterface::dummy_gc(nullptr, panda::mem::GCSettings()); +DummyGC RuntimeInterface::dummy_gc_(nullptr, panda::mem::GCSettings()); -Method *RuntimeInterface::resolved_method; +Method *RuntimeInterface::resolved_method_; -Field *RuntimeInterface::resolved_field; +Field *RuntimeInterface::resolved_field_; -const void *RuntimeInterface::entry_point; +const void *RuntimeInterface::entry_point_; -uint32_t RuntimeInterface::jit_threshold; +uint32_t RuntimeInterface::jit_threshold_; } // namespace panda::interpreter::test diff --git a/runtime/tests/interpreter/test_runtime_interface.h b/runtime/tests/interpreter/test_runtime_interface.h index 18ad6d17810a515392f32e0d1b4d95b8ee8d34e4..822429bef3fcf483d55d3e39e721361b8ee76ddb 100644 --- a/runtime/tests/interpreter/test_runtime_interface.h +++ b/runtime/tests/interpreter/test_runtime_interface.h @@ -151,38 +151,38 @@ public: BytecodeId id) { EXPECT_EQ(id, METHOD_ID); - return resolved_method; + return resolved_method_; } static Field *ResolveField([[maybe_unused]] ManagedThread *thread, [[maybe_unused]] const Method &caller, BytecodeId id) { EXPECT_EQ(id, FIELD_ID); - return resolved_field; + return resolved_field_; } - template + template static Class *ResolveClass([[maybe_unused]] ManagedThread *thread, [[maybe_unused]] const Method &caller, BytecodeId id) { EXPECT_EQ(id, TYPE_ID); - return resolved_class; + return resolved_class_; } static uint32_t FindCatchBlock([[maybe_unused]] const Method &method, [[maybe_unused]] ObjectHeader *exception, [[maybe_unused]] uint32_t pc) { - return catch_block_pc_offset; + return catch_block_pc_offset_; } static void SetCatchBlockPcOffset(uint32_t pc_offset) { - catch_block_pc_offset = pc_offset; + catch_block_pc_offset_ = pc_offset; } static uint32_t GetCompilerHotnessThreshold() { - return jit_threshold; + return jit_threshold_; } static bool IsCompilerEnableJit() @@ -192,12 +192,12 @@ public: static void SetCompilerHotnessThreshold(uint32_t threshold) { - jit_threshold = threshold; + jit_threshold_ = threshold; } static void JITCompileMethod(Method *method) { - method->SetCompiledEntryPoint(entry_point); + method->SetCompiledEntryPoint(entry_point_); } static void SetCurrentFrame([[maybe_unused]] ManagedThread *thread, Frame *frame) @@ -213,115 +213,115 @@ public: static void SetupResolvedMethod(Method *method) { ManagedThread::GetCurrent()->GetInterpreterCache()->Clear(); - resolved_method = method; + resolved_method_ = method; } static void SetupResolvedField(Field *field) { ManagedThread::GetCurrent()->GetInterpreterCache()->Clear(); - resolved_field = field; + resolved_field_ = field; } static void SetupResolvedClass(Class *klass) { ManagedThread::GetCurrent()->GetInterpreterCache()->Clear(); - resolved_class = klass; + resolved_class_ = klass; } static void SetupCatchBlockPcOffset(uint32_t pc_offset) { - catch_block_pc_offset = pc_offset; + catch_block_pc_offset_ = pc_offset; } static void SetupNativeEntryPoint(const void *p) { - entry_point = p; + entry_point_ = p; } static coretypes::Array *CreateArray(Class *klass, coretypes::array_size_t length) { - EXPECT_EQ(klass, array_class); - EXPECT_EQ(length, array_length); - return array_object; + EXPECT_EQ(klass, array_class_); + EXPECT_EQ(length, array_length_); + return array_object_; } static void SetupArrayClass(Class *klass) { - array_class = klass; + array_class_ = klass; } static void SetupArrayLength(coretypes::array_size_t length) { - array_length = length; + array_length_ = length; } static void SetupArrayObject(coretypes::Array *obj) { - array_object = obj; + array_object_ = obj; } static ObjectHeader *CreateObject(Class *klass) { - EXPECT_EQ(klass, object_class); - return object; + EXPECT_EQ(klass, object_class_); + return object_; } static void SetupObjectClass(Class *klass) { - object_class = klass; + object_class_ = klass; } static void SetupObject(ObjectHeader *obj) { - object = obj; + object_ = obj; } static Value InvokeMethod(ManagedThread *thread, Method *method, Value *args) { - return invoke_handler(thread, method, args); + return invoke_handler_(thread, method, args); } static void SetupInvokeMethodHandler(InvokeMethodHandler handler) { - invoke_handler = handler; + invoke_handler_ = handler; } // Throw exceptions static void ThrowNullPointerException() { - ASSERT_TRUE(npe_data.expected); + ASSERT_TRUE(npe_data_.expected); } static void ThrowArrayIndexOutOfBoundsException(coretypes::array_ssize_t idx, coretypes::array_size_t length) { - ASSERT_TRUE(array_oob_exception_data.expected); - ASSERT_EQ(array_oob_exception_data.idx, idx); - ASSERT_EQ(array_oob_exception_data.length, length); + ASSERT_TRUE(array_oob_exception_data_.expected); + ASSERT_EQ(array_oob_exception_data_.idx, idx); + ASSERT_EQ(array_oob_exception_data_.length, length); } static void ThrowNegativeArraySizeException(coretypes::array_ssize_t size) { - ASSERT_TRUE(array_neg_size_exception_data.expected); - ASSERT_EQ(array_neg_size_exception_data.size, size); + ASSERT_TRUE(array_neg_size_exception_data_.expected); + ASSERT_EQ(array_neg_size_exception_data_.size, size); } static void ThrowArithmeticException() { - ASSERT_TRUE(arithmetic_exception_data.expected); + ASSERT_TRUE(arithmetic_exception_data_.expected); } static void ThrowClassCastException(Class *dst_type, Class *src_type) { - ASSERT_TRUE(class_cast_exception_data.expected); - ASSERT_EQ(class_cast_exception_data.dst_type, dst_type); - ASSERT_EQ(class_cast_exception_data.src_type, src_type); + ASSERT_TRUE(class_cast_exception_data_.expected); + ASSERT_EQ(class_cast_exception_data_.dst_type, dst_type); + ASSERT_EQ(class_cast_exception_data_.src_type, src_type); } static void ThrowAbstractMethodError(Method *method) { - ASSERT_TRUE(abstract_method_error_data.expected); - ASSERT_EQ(abstract_method_error_data.method, method); + ASSERT_TRUE(abstract_method_error_data_.expected); + ASSERT_EQ(abstract_method_error_data_.method, method); } static void ThrowIncompatibleClassChangeErrorForMethodConflict([[maybe_unused]] Method *method) {} @@ -336,53 +336,53 @@ public: static void ThrowArrayStoreException(Class *array_klass, Class *elem_class) { - ASSERT_TRUE(array_store_exception_data.expected); - ASSERT_EQ(array_store_exception_data.array_class, array_klass); - ASSERT_EQ(array_store_exception_data.elem_class, elem_class); + ASSERT_TRUE(array_store_exception_data_.expected); + ASSERT_EQ(array_store_exception_data_.array_class, array_klass); + ASSERT_EQ(array_store_exception_data_.elem_class, elem_class); } static void SetArrayStoreException(ArrayStoreExceptionData data) { - array_store_exception_data = data; + array_store_exception_data_ = data; } static void SetNullPointerExceptionData(NullPointerExceptionData data) { - npe_data = data; + npe_data_ = data; } static void SetArrayIndexOutOfBoundsExceptionData(ArrayIndexOutOfBoundsExceptionData data) { - array_oob_exception_data = data; + array_oob_exception_data_ = data; } static void SetNegativeArraySizeExceptionData(NegativeArraySizeExceptionData data) { - array_neg_size_exception_data = data; + array_neg_size_exception_data_ = data; } static void SetArithmeticExceptionData(ArithmeticException data) { - arithmetic_exception_data = data; + arithmetic_exception_data_ = data; } static void SetClassCastExceptionData(ClassCastExceptionData data) { - class_cast_exception_data = data; + class_cast_exception_data_ = data; } static void SetAbstractMethodErrorData(AbstractMethodError data) { - abstract_method_error_data = data; + abstract_method_error_data_ = data; } - template + template static Frame *CreateFrame(size_t nregs, Method *method, Frame *prev) { uint32_t ext_sz = EmptyExtFrameDataSize; auto allocator = Thread::GetCurrent()->GetVM()->GetHeapManager()->GetInternalAllocator(); void *mem = - allocator->Allocate(panda::Frame::GetAllocSize(panda::Frame::GetActualSize(nregs), ext_sz), + allocator->Allocate(panda::Frame::GetAllocSize(panda::Frame::GetActualSize(nregs), ext_sz), GetLogAlignment(8), ManagedThread::GetCurrent()); return new (Frame::FromExt(mem, ext_sz)) panda::Frame(mem, method, prev, nregs); } @@ -408,7 +408,7 @@ public: static mem::GC *GetGC() { - return &panda::interpreter::test::RuntimeInterface::dummy_gc; + return &panda::interpreter::test::RuntimeInterface::dummy_gc_; } static const uint8_t *GetMethodName([[maybe_unused]] Method *caller, [[maybe_unused]] BytecodeId method_id) @@ -418,7 +418,7 @@ public: static Class *GetMethodClass([[maybe_unused]] Method *caller, [[maybe_unused]] BytecodeId method_id) { - return resolved_class; + return resolved_class_; } static uint32_t GetMethodArgumentsCount([[maybe_unused]] Method *caller, [[maybe_unused]] BytecodeId method_id) @@ -436,45 +436,45 @@ public: } private: - static ArrayIndexOutOfBoundsExceptionData array_oob_exception_data; + static ArrayIndexOutOfBoundsExceptionData array_oob_exception_data_; - static NegativeArraySizeExceptionData array_neg_size_exception_data; + static NegativeArraySizeExceptionData array_neg_size_exception_data_; - static NullPointerExceptionData npe_data; + static NullPointerExceptionData npe_data_; - static ArithmeticException arithmetic_exception_data; + static ArithmeticException arithmetic_exception_data_; - static ClassCastExceptionData class_cast_exception_data; + static ClassCastExceptionData class_cast_exception_data_; - static AbstractMethodError abstract_method_error_data; + static AbstractMethodError abstract_method_error_data_; - static ArrayStoreExceptionData array_store_exception_data; + static ArrayStoreExceptionData array_store_exception_data_; - static coretypes::Array *array_object; + static coretypes::Array *array_object_; - static Class *array_class; + static Class *array_class_; - static coretypes::array_size_t array_length; + static coretypes::array_size_t array_length_; - static ObjectHeader *object; + static ObjectHeader *object_; - static Class *object_class; + static Class *object_class_; - static Class *resolved_class; + static Class *resolved_class_; - static uint32_t catch_block_pc_offset; + static uint32_t catch_block_pc_offset_; - static Method *resolved_method; + static Method *resolved_method_; - static Field *resolved_field; + static Field *resolved_field_; - static InvokeMethodHandler invoke_handler; + static InvokeMethodHandler invoke_handler_; - static const void *entry_point; + static const void *entry_point_; - static uint32_t jit_threshold; + static uint32_t jit_threshold_; - static panda::interpreter::test::DummyGC dummy_gc; + static panda::interpreter::test::DummyGC dummy_gc_; }; } // namespace panda::interpreter::test diff --git a/runtime/tests/interpreter_test.cpp b/runtime/tests/interpreter_test.cpp index 4b1556c9fbf31c0daecee52e7dd8493b78a6e1a9..57465769392177ff323c6608a780360ef87b85f0 100644 --- a/runtime/tests/interpreter_test.cpp +++ b/runtime/tests/interpreter_test.cpp @@ -849,41 +849,41 @@ TEST_F(InterpreterTest, TestCast) // clang-format off -template +template struct ArrayComponentTypeHelper { - using type = std::conditional_t>>>>>>>>>>>; + using Type = std::conditional_t>>>>>>>>>>>; }; // clang-format on -template -using ArrayComponentTypeHelperT = typename ArrayComponentTypeHelper::type; +template +using ArrayComponentTypeHelperT = typename ArrayComponentTypeHelper::Type; -template +template struct ArrayStoredTypeHelperT { - using type = typename ArrayComponentTypeHelper::type; + using Type = typename ArrayComponentTypeHelper::Type; }; template <> struct ArrayStoredTypeHelperT { - using type = ObjectPointerType; + using Type = ObjectPointerType; }; -template -typename ArrayStoredTypeHelperT::type CastIfRef(ArrayComponentTypeHelperT value) +template +typename ArrayStoredTypeHelperT::Type CastIfRef(ArrayComponentTypeHelperT value) { - if constexpr (type_id == panda_file::Type::TypeId::REFERENCE) { + if constexpr (TYPE_ID == panda_file::Type::TypeId::REFERENCE) { return static_cast(reinterpret_cast(value)); } else { return value; @@ -933,14 +933,14 @@ PandaString GetArrayClassName(panda_file::Type::TypeId component_type_id) return descriptor; } -template +template static void TestArray() { std::ostringstream ss; - ss << "Test with component type id " << static_cast(component_type_id); + ss << "Test with component type id " << static_cast(COMPONENT_TYPE_ID); - using component_type = ArrayComponentTypeHelperT; - using stored_type = typename ArrayStoredTypeHelperT::type; + using ComponentType = ArrayComponentTypeHelperT; + using StoredType = typename ArrayStoredTypeHelperT::Type; BytecodeEmitter emitter; @@ -952,30 +952,30 @@ static void TestArray() ASSERT_NE(class_linker, nullptr) << ss.str(); auto ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); - PandaString array_class_name = GetArrayClassName(component_type_id); + PandaString array_class_name = GetArrayClassName(COMPONENT_TYPE_ID); Class *array_class = class_linker->GetExtension(ctx)->GetClass(utf::CStringAsMutf8(array_class_name.c_str())); Class *elem_class = array_class->GetComponentType(); - const component_type STORE_VALUE = GetStoreValue(elem_class); - const component_type LOAD_VALUE = GetLoadValue(elem_class); + const ComponentType store_value = GetStoreValue(elem_class); + const ComponentType load_value = GetLoadValue(elem_class); emitter.Movi(0, ARRAY_LENGTH); emitter.Newarr(1, 0, RuntimeInterface::TYPE_ID.AsIndex()); - if constexpr (component_type_id == panda_file::Type::TypeId::REFERENCE) { + if constexpr (COMPONENT_TYPE_ID == panda_file::Type::TypeId::REFERENCE) { emitter.LdaObj(4); - } else if constexpr (component_type_id == panda_file::Type::TypeId::F32) { - emitter.Fldai(bit_cast(STORE_VALUE)); - } else if constexpr (component_type_id == panda_file::Type::TypeId::F64) { - emitter.FldaiWide(bit_cast(STORE_VALUE)); + } else if constexpr (COMPONENT_TYPE_ID == panda_file::Type::TypeId::F32) { + emitter.Fldai(bit_cast(store_value)); + } else if constexpr (COMPONENT_TYPE_ID == panda_file::Type::TypeId::F64) { + emitter.FldaiWide(bit_cast(store_value)); } else { - emitter.LdaiWide(static_cast(STORE_VALUE)); + emitter.LdaiWide(static_cast(store_value)); } emitter.Movi(2, STORE_IDX); - if constexpr (component_type_id != panda_file::Type::TypeId::REFERENCE) { - switch (component_type_id) { + if constexpr (COMPONENT_TYPE_ID != panda_file::Type::TypeId::REFERENCE) { + switch (COMPONENT_TYPE_ID) { case panda_file::Type::TypeId::U1: case panda_file::Type::TypeId::U8: { emitter.Starr8(1, 2); @@ -1034,7 +1034,7 @@ static void TestArray() emitter.LdarrObj(1); } - if constexpr (component_type_id != panda_file::Type::TypeId::REFERENCE) { + if constexpr (COMPONENT_TYPE_ID != panda_file::Type::TypeId::REFERENCE) { emitter.StaWide(3); } else { emitter.StaObj(3); @@ -1055,12 +1055,12 @@ static void TestArray() f->SetMethod(method.get()); auto frame_handler = StaticFrameHandler(f.get()); - if constexpr (component_type_id == panda_file::Type::TypeId::REFERENCE) { - frame_handler.GetVReg(4).SetReference(STORE_VALUE); + if constexpr (COMPONENT_TYPE_ID == panda_file::Type::TypeId::REFERENCE) { + frame_handler.GetVReg(4).SetReference(store_value); } - coretypes::Array *array = AllocArray(array_class, sizeof(stored_type), ARRAY_LENGTH); - array->Set(LOAD_IDX, LOAD_VALUE); + coretypes::Array *array = AllocArray(array_class, sizeof(StoredType), ARRAY_LENGTH); + array->Set(LOAD_IDX, load_value); RuntimeInterface::SetupResolvedClass(array_class); RuntimeInterface::SetupArrayClass(array_class); @@ -1078,13 +1078,13 @@ static void TestArray() auto *result = static_cast(frame_handler.GetVReg(1).GetReference()); EXPECT_EQ(result, array) << ss.str(); - EXPECT_EQ(frame_handler.GetVReg(3).GetAs(), LOAD_VALUE) << ss.str(); + EXPECT_EQ(frame_handler.GetVReg(3).GetAs(), load_value) << ss.str(); - std::vector data(ARRAY_LENGTH); - data[LOAD_IDX] = CastIfRef(LOAD_VALUE); - data[STORE_IDX] = CastIfRef(STORE_VALUE); + std::vector data(ARRAY_LENGTH); + data[LOAD_IDX] = CastIfRef(load_value); + data[STORE_IDX] = CastIfRef(store_value); - EXPECT_THAT(data, ::testing::ElementsAreArray(reinterpret_cast(array->GetData()), ARRAY_LENGTH)) + EXPECT_THAT(data, ::testing::ElementsAreArray(reinterpret_cast(array->GetData()), ARRAY_LENGTH)) << ss.str(); } @@ -1974,7 +1974,7 @@ static void MakeShorty(size_t num_args, std::vector *buf) buf->push_back(val); } -template +template static std::pair, std::unique_ptr> CreateResolvedMethod( Class *klass, size_t vreg_num, const std::vector args, std::vector *bytecode, std::vector *shorty_buf) @@ -1983,7 +1983,7 @@ static std::pair, std::unique_ptr Label label = emitter.CreateLabel(); size_t start_idx = 0; - if constexpr (is_dynamic) { + if constexpr (IS_DYNAMIC) { ++start_idx; // skip function object } for (size_t i = start_idx; i < args.size(); i++) { diff --git a/runtime/tests/interpreter_test_switch.cpp b/runtime/tests/interpreter_test_switch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9df8b9d516555939d9a60a0405978a77fbe39437 --- /dev/null +++ b/runtime/tests/interpreter_test_switch.cpp @@ -0,0 +1,152 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "assembler/assembly-parser.h" +#include "bytecode_instruction.h" +#include "include/thread_scopes.h" +#include "libpandafile/bytecode_instruction-inl.h" +#include "runtime/include/managed_thread.h" +#include "runtime/include/runtime.h" +#include "runtime/include/runtime_notification.h" +#include "runtime/interpreter/runtime_interface.h" + +namespace panda::interpreter::test { + +class InterpreterTestSwitch : public testing::Test { +public: + InterpreterTestSwitch() + { + RuntimeOptions options; + options.SetShouldLoadBootPandaFiles(false); + options.SetShouldInitializeIntrinsics(false); + options.SetRunGcInPlace(true); + options.SetVerifyCallStack(false); + Runtime::Create(options); + } + + ~InterpreterTestSwitch() override + { + Runtime::Destroy(); + } + + NO_COPY_SEMANTIC(InterpreterTestSwitch); + NO_MOVE_SEMANTIC(InterpreterTestSwitch); +}; + +constexpr int32_t RET = 10; + +static int32_t EntryPoint(Method * /* unused */) +{ + auto *thread = ManagedThread::GetCurrent(); + thread->SetCurrentDispatchTable( + thread->GetDebugDispatchTable()); + return RET; +} + +TEST_F(InterpreterTestSwitch, SwitchToDebug) +{ + pandasm::Parser p; + + auto source = R"( + .function i32 f() { + ldai 10 + return + } + + .function i32 g() { + call f + return + } + + .function i32 main() { + call g + return + } + )"; + + auto res = p.Parse(source); + auto pf = pandasm::AsmEmitter::Emit(res.Value()); + ASSERT_NE(pf, nullptr); + + ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker(); + class_linker->AddPandaFile(std::move(pf)); + auto *extension = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY); + + class Listener : public RuntimeListener { + public: + struct Event { + ManagedThread *thread; + Method *method; + uint32_t bc_offset; + }; + + void BytecodePcChanged(ManagedThread *thread, Method *method, uint32_t bc_offset) override + { + events_.push_back({thread, method, bc_offset}); + } + + auto &GetEvents() const + { + return events_; + } + + private: + std::vector events_ {}; + }; + + Listener listener {}; + + auto *notification_manager = Runtime::GetCurrent()->GetNotificationManager(); + notification_manager->AddListener(&listener, RuntimeNotificationManager::BYTECODE_PC_CHANGED); + + std::vector args; + Value v; + Method *main_method; + + auto *thread = ManagedThread::GetCurrent(); + + { + ScopedManagedCodeThread smc(thread); + PandaString descriptor; + + Class *klass = extension->GetClass(ClassHelper::GetDescriptor(utf::CStringAsMutf8("_GLOBAL"), &descriptor)); + ASSERT_NE(klass, nullptr); + + main_method = klass->GetDirectMethod(utf::CStringAsMutf8("main")); + ASSERT_NE(main_method, nullptr); + + Method *f_method = klass->GetDirectMethod(utf::CStringAsMutf8("f")); + ASSERT_NE(f_method, nullptr); + + f_method->SetCompiledEntryPoint(reinterpret_cast(EntryPoint)); + + v = main_method->Invoke(thread, args.data()); + } + + notification_manager->RemoveListener(&listener, RuntimeNotificationManager::BYTECODE_PC_CHANGED); + + ASSERT_EQ(v.GetAs(), RET); + ASSERT_EQ(listener.GetEvents().size(), 1U); + + auto &event = listener.GetEvents()[0]; + EXPECT_EQ(event.thread, thread); + EXPECT_EQ(event.method, main_method); + EXPECT_EQ(event.bc_offset, BytecodeInstruction::Size(BytecodeInstruction::Format::V4_V4_ID16)); +} + +} // namespace panda::interpreter::test \ No newline at end of file diff --git a/runtime/tests/intrinsics_blacklist_test.cpp b/runtime/tests/intrinsics_blacklist_test.cpp index dc19736b3a72d147326efdb1d28682f09e1e48f2..1d80cb06a0a3c995a0263fd9a0de0bab2a03aaf0 100644 --- a/runtime/tests/intrinsics_blacklist_test.cpp +++ b/runtime/tests/intrinsics_blacklist_test.cpp @@ -20,7 +20,7 @@ namespace panda::test { -inline std::string separator() +inline std::string Separator() { #ifdef _WIN32 return "\\"; @@ -66,7 +66,7 @@ TEST_F(IntrinsicsBlacklistTest, DisableIntrinsic) options.SetIntrinsicsBlacklist({"Math::absI32"}); auto exec_path = panda::os::file::File::GetExecutablePath(); std::string panda_std_lib = - exec_path.Value() + separator() + ".." + separator() + "pandastdlib" + separator() + "arkstdlib.abc"; + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "arkstdlib.abc"; options.SetBootPandaFiles({panda_std_lib}); CreateRuntime(options); pandasm::Parser parser; diff --git a/runtime/tests/mark_word_test.cpp b/runtime/tests/mark_word_test.cpp index 5d58a48aef47cb7064eb90e8186e71dc035d859a..0264f0d25bc04229644b97383254bf3a7cab501f 100644 --- a/runtime/tests/mark_word_test.cpp +++ b/runtime/tests/mark_word_test.cpp @@ -58,36 +58,36 @@ protected: #endif gen_ = std::mt19937(seed_); - threadIdRange_ = MarkWordDistribution(0, MAX_THREAD_ID); - lockCountRange_ = MarkWordDistribution(0, MAX_LOCK_COUNT); - monitorIdRange_ = MarkWordDistribution(0, MAX_MONITOR_ID); - hashRange_ = MarkWordDistribution(0, MAX_HASH); - forwardingAddressRange_ = MarkWordDistribution(0, MAX_FORWARDING_ADDRESS); + thread_id_range_ = MarkWordDistribution(0, MAX_THREAD_ID); + lock_count_range_ = MarkWordDistribution(0, MAX_LOCK_COUNT); + monitor_id_range_ = MarkWordDistribution(0, MAX_MONITOR_ID); + hash_range_ = MarkWordDistribution(0, MAX_HASH); + forwarding_address_range_ = MarkWordDistribution(0, MAX_FORWARDING_ADDRESS); } ManagedThread::ThreadId GetThreadId() { - return threadIdRange_(gen_); + return thread_id_range_(gen_); } uint32_t GetLockCount() { - return lockCountRange_(gen_); + return lock_count_range_(gen_); } Monitor::MonitorId GetMonitorId() { - return monitorIdRange_(gen_); + return monitor_id_range_(gen_); } uint32_t GetHash() { - return hashRange_(gen_); + return hash_range_(gen_); } MarkWord::markWordSize GetForwardingAddress() { - return forwardingAddressRange_(gen_) & MarkWord::MarkWordRepresentation::FORWARDING_ADDRESS_MASK_IN_PLACE; + return forwarding_address_range_(gen_) & MarkWord::MarkWordRepresentation::FORWARDING_ADDRESS_MASK_IN_PLACE; } uint32_t GetSeed() @@ -98,11 +98,11 @@ protected: private: uint32_t seed_; std::mt19937 gen_; - MarkWordDistribution threadIdRange_; - MarkWordDistribution lockCountRange_; - MarkWordDistribution monitorIdRange_; - MarkWordDistribution hashRange_; - MarkWordDistribution forwardingAddressRange_; + MarkWordDistribution thread_id_range_; + MarkWordDistribution lock_count_range_; + MarkWordDistribution monitor_id_range_; + MarkWordDistribution hash_range_; + MarkWordDistribution forwarding_address_range_; }; class MaxTestValuesGetter { @@ -142,67 +142,68 @@ protected: template class MarkWordWrapper { public: - MarkWordWrapper(bool isMarkedForGC = false, bool isReadBarrierSet = false) + MarkWordWrapper(bool is_marked_for_gc = false, bool is_read_barrier_set = false) { - if (isMarkedForGC) { + if (is_marked_for_gc) { mw_ = mw_.SetMarkedForGC(); } - if (isReadBarrierSet) { + if (is_read_barrier_set) { mw_ = mw_.SetReadBarrier(); } }; - void CheckUnlocked(bool isMarkedForGC = false, bool isReadBarrierSet = false) + void CheckUnlocked(bool is_marked_for_gc = false, bool is_read_barrier_set = false) { - ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_UNLOCKED) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsMarkedForGC(), isMarkedForGC) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsReadBarrierSet(), isReadBarrierSet) << " seed = " << paramGetter_.GetSeed(); + ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_UNLOCKED) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsMarkedForGC(), is_marked_for_gc) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsReadBarrierSet(), is_read_barrier_set) << " seed = " << param_getter_.GetSeed(); } - void CheckLightweightLock(const ManagedThread::ThreadId tId, const uint32_t lockCount, bool isMarkedForGC, - bool isReadBarrierSet = false) + void CheckLightweightLock(const ManagedThread::ThreadId t_id, const uint32_t lock_count, bool is_marked_for_gc, + bool is_read_barrier_set = false) { ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_LIGHT_LOCKED) - << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.GetThreadId(), tId) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.GetLockCount(), lockCount) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsMarkedForGC(), isMarkedForGC) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsReadBarrierSet(), isReadBarrierSet) << " seed = " << paramGetter_.GetSeed(); + << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.GetThreadId(), t_id) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.GetLockCount(), lock_count) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsMarkedForGC(), is_marked_for_gc) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsReadBarrierSet(), is_read_barrier_set) << " seed = " << param_getter_.GetSeed(); } - void CheckHeavyweightLock(const Monitor::MonitorId mId, bool isMarkedForGC, bool isReadBarrierSet = false) + void CheckHeavyweightLock(const Monitor::MonitorId m_id, bool is_marked_for_gc, + bool is_read_barrier_set = false) { ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_HEAVY_LOCKED) - << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.GetMonitorId(), mId) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsMarkedForGC(), isMarkedForGC) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsReadBarrierSet(), isReadBarrierSet) << " seed = " << paramGetter_.GetSeed(); + << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.GetMonitorId(), m_id) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsMarkedForGC(), is_marked_for_gc) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsReadBarrierSet(), is_read_barrier_set) << " seed = " << param_getter_.GetSeed(); } - void CheckHashed(uint32_t hash, bool isMarkedForGC, bool isReadBarrierSet = false) + void CheckHashed(uint32_t hash, bool is_marked_for_gc, bool is_read_barrier_set = false) { if (mw_.CONFIG_IS_HASH_IN_OBJ_HEADER) { - ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_HASHED) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.GetHash(), hash) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsMarkedForGC(), isMarkedForGC) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.IsReadBarrierSet(), isReadBarrierSet) << " seed = " << paramGetter_.GetSeed(); + ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_HASHED) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.GetHash(), hash) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsMarkedForGC(), is_marked_for_gc) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.IsReadBarrierSet(), is_read_barrier_set) << " seed = " << param_getter_.GetSeed(); } } - void CheckGC(MarkWord::markWordSize forwardingAddress) + void CheckGC(MarkWord::markWordSize forwarding_address) { - ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_GC) << " seed = " << paramGetter_.GetSeed(); - ASSERT_EQ(mw_.GetForwardingAddress(), forwardingAddress) << " seed = " << paramGetter_.GetSeed(); + ASSERT_EQ(mw_.GetState(), MarkWord::ObjectState::STATE_GC) << " seed = " << param_getter_.GetSeed(); + ASSERT_EQ(mw_.GetForwardingAddress(), forwarding_address) << " seed = " << param_getter_.GetSeed(); } - void DecodeLightLock(ManagedThread::ThreadId tId, uint32_t lCount) + void DecodeLightLock(ManagedThread::ThreadId t_id, uint32_t l_count) { - mw_ = mw_.DecodeFromLightLock(tId, lCount); + mw_ = mw_.DecodeFromLightLock(t_id, l_count); } - void DecodeHeavyLock(Monitor::MonitorId mId) + void DecodeHeavyLock(Monitor::MonitorId m_id) { - mw_ = mw_.DecodeFromMonitor(mId); + mw_ = mw_.DecodeFromMonitor(m_id); } void DecodeHash(uint32_t hash) @@ -210,38 +211,38 @@ protected: mw_ = mw_.DecodeFromHash(hash); } - void DecodeForwardingAddress(MarkWord::markWordSize fAddress) + void DecodeForwardingAddress(MarkWord::markWordSize f_address) { - mw_ = mw_.DecodeFromForwardingAddress(fAddress); + mw_ = mw_.DecodeFromForwardingAddress(f_address); } - void DecodeAndCheckLightLock(bool isMarkedForGC = false, bool isReadBarrierSet = false) + void DecodeAndCheckLightLock(bool is_marked_for_gc = false, bool is_read_barrier_set = false) { - auto tId = paramGetter_.GetThreadId(); - auto lCount = paramGetter_.GetLockCount(); - DecodeLightLock(tId, lCount); - CheckLightweightLock(tId, lCount, isMarkedForGC, isReadBarrierSet); + auto t_id = param_getter_.GetThreadId(); + auto l_count = param_getter_.GetLockCount(); + DecodeLightLock(t_id, l_count); + CheckLightweightLock(t_id, l_count, is_marked_for_gc, is_read_barrier_set); } - void DecodeAndCheckHeavyLock(bool isMarkedForGC = false, bool isReadBarrierSet = false) + void DecodeAndCheckHeavyLock(bool is_marked_for_gc = false, bool is_read_barrier_set = false) { - auto mId = paramGetter_.GetMonitorId(); - DecodeHeavyLock(mId); - CheckHeavyweightLock(mId, isMarkedForGC, isReadBarrierSet); + auto m_id = param_getter_.GetMonitorId(); + DecodeHeavyLock(m_id); + CheckHeavyweightLock(m_id, is_marked_for_gc, is_read_barrier_set); } - void DecodeAndCheckHashed(bool isMarkedForGC = false, bool isReadBarrierSet = false) + void DecodeAndCheckHashed(bool is_marked_for_gc = false, bool is_read_barrier_set = false) { - auto hash = paramGetter_.GetHash(); + auto hash = param_getter_.GetHash(); DecodeHash(hash); - CheckHashed(hash, isMarkedForGC, isReadBarrierSet); + CheckHashed(hash, is_marked_for_gc, is_read_barrier_set); } void DecodeAndCheckGC() { - auto fAddress = paramGetter_.GetForwardingAddress(); - DecodeForwardingAddress(fAddress); - CheckGC(fAddress); + auto f_address = param_getter_.GetForwardingAddress(); + DecodeForwardingAddress(f_address); + CheckGC(f_address); } void SetMarkedForGC() @@ -256,17 +257,17 @@ protected: private: MarkWord mw_; - Getter paramGetter_; + Getter param_getter_; }; template - void CheckMakeHashed(bool isMarkedForGC, bool isReadBarrierSet); + void CheckMakeHashed(bool is_marked_for_gc, bool is_read_barrier_set); template - void CheckMakeLightweightLock(bool isMarkedForGC, bool isReadBarrierSet); + void CheckMakeLightweightLock(bool is_marked_for_gc, bool is_read_barrier_set); template - void CheckMakeHeavyweightLock(bool isMarkedForGC, bool isReadBarrierSet); + void CheckMakeHeavyweightLock(bool is_marked_for_gc, bool is_read_barrier_set); template void CheckMakeGC(); @@ -279,22 +280,22 @@ protected: }; template -void MarkWordTest::CheckMakeHashed(bool isMarkedForGC, bool isReadBarrierSet) +void MarkWordTest::CheckMakeHashed(bool is_marked_for_gc, bool is_read_barrier_set) { // nothing, gc = markedForGC, rb = readBarrierSet, state = unlocked - MarkWordWrapper wrapper(isMarkedForGC, isReadBarrierSet); + MarkWordWrapper wrapper(is_marked_for_gc, is_read_barrier_set); // check new hash - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); // check after lightweight lock - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); // check after heavyweight lock - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); } TEST_F(MarkWordTest, CreateHashedWithRandValues) @@ -314,22 +315,22 @@ TEST_F(MarkWordTest, CreateHashedWithMaxValues) } template -void MarkWordTest::CheckMakeLightweightLock(bool isMarkedForGC, bool isReadBarrierSet) +void MarkWordTest::CheckMakeLightweightLock(bool is_marked_for_gc, bool is_read_barrier_set) { // nothing, gc = markedForGC, rb = readBarrierSet, state = unlocked - MarkWordWrapper wrapper(isMarkedForGC, isReadBarrierSet); + MarkWordWrapper wrapper(is_marked_for_gc, is_read_barrier_set); // check new lightweight lock - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); // check after hash - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); // check after heavyweight lock - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); } TEST_F(MarkWordTest, CreateLightweightLockWithRandValues) @@ -349,22 +350,22 @@ TEST_F(MarkWordTest, CreateLightweightLockWithMaxValues) } template -void MarkWordTest::CheckMakeHeavyweightLock(bool isMarkedForGC, bool isReadBarrierSet) +void MarkWordTest::CheckMakeHeavyweightLock(bool is_marked_for_gc, bool is_read_barrier_set) { // nothing, gc = markedForGC, rb = readBarrierSet, state = unlocked - MarkWordWrapper wrapper(isMarkedForGC, isReadBarrierSet); + MarkWordWrapper wrapper(is_marked_for_gc, is_read_barrier_set); // check new heavyweight lock - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); // check after hash - wrapper.DecodeAndCheckHashed(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckHashed(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); // check after lightweight lock - wrapper.DecodeAndCheckLightLock(isMarkedForGC, isReadBarrierSet); - wrapper.DecodeAndCheckHeavyLock(isMarkedForGC, isReadBarrierSet); + wrapper.DecodeAndCheckLightLock(is_marked_for_gc, is_read_barrier_set); + wrapper.DecodeAndCheckHeavyLock(is_marked_for_gc, is_read_barrier_set); } TEST_F(MarkWordTest, CreateHeavyweightLockWithRandValues) @@ -428,7 +429,7 @@ TEST_F(MarkWordTest, CreateGCWithMaxValues) template void MarkWordTest::CheckMarkingWithGC() { - Getter paramGetter; + Getter param_getter; // with unlocked { @@ -441,28 +442,28 @@ void MarkWordTest::CheckMarkingWithGC() // with lightweight locked { MarkWordWrapper wrapper; - auto tId = paramGetter.GetThreadId(); - auto lCount = paramGetter.GetLockCount(); - wrapper.DecodeLightLock(tId, lCount); + auto t_id = param_getter.GetThreadId(); + auto l_count = param_getter.GetLockCount(); + wrapper.DecodeLightLock(t_id, l_count); wrapper.SetMarkedForGC(); - wrapper.CheckLightweightLock(tId, lCount, true); + wrapper.CheckLightweightLock(t_id, l_count, true); } // with heavyweight locked { MarkWordWrapper wrapper; - auto mId = paramGetter.GetMonitorId(); - wrapper.DecodeHeavyLock(mId); + auto m_id = param_getter.GetMonitorId(); + wrapper.DecodeHeavyLock(m_id); wrapper.SetMarkedForGC(); - wrapper.CheckHeavyweightLock(mId, true); + wrapper.CheckHeavyweightLock(m_id, true); } // with hashed { MarkWordWrapper wrapper; - auto hash = paramGetter.GetHash(); + auto hash = param_getter.GetHash(); wrapper.DecodeHash(hash); wrapper.SetMarkedForGC(); @@ -483,7 +484,7 @@ TEST_F(MarkWordTest, MarkWithGCWithMaxValues) template void MarkWordTest::CheckReadBarrierSet() { - Getter paramGetter; + Getter param_getter; // with unlocked { @@ -496,28 +497,28 @@ void MarkWordTest::CheckReadBarrierSet() // with lightweight locked { MarkWordWrapper wrapper; - auto tId = paramGetter.GetThreadId(); - auto lCount = paramGetter.GetLockCount(); - wrapper.DecodeLightLock(tId, lCount); + auto t_id = param_getter.GetThreadId(); + auto l_count = param_getter.GetLockCount(); + wrapper.DecodeLightLock(t_id, l_count); wrapper.SetReadBarrier(); - wrapper.CheckLightweightLock(tId, lCount, false, true); + wrapper.CheckLightweightLock(t_id, l_count, false, true); } // with heavyweight locked { MarkWordWrapper wrapper; - auto mId = paramGetter.GetMonitorId(); - wrapper.DecodeHeavyLock(mId); + auto m_id = param_getter.GetMonitorId(); + wrapper.DecodeHeavyLock(m_id); wrapper.SetReadBarrier(); - wrapper.CheckHeavyweightLock(mId, false, true); + wrapper.CheckHeavyweightLock(m_id, false, true); } // with hashed { MarkWordWrapper wrapper; - auto hash = paramGetter.GetHash(); + auto hash = param_getter.GetHash(); wrapper.DecodeHash(hash); wrapper.SetReadBarrier(); diff --git a/runtime/tests/math_helpers_test.cpp b/runtime/tests/math_helpers_test.cpp index 6c88d813d7b1f156490aa50be3fe4c6ab13f3e8a..6fbbb164fd5f8bf22d20b2b3f07c93725c1328e6 100644 --- a/runtime/tests/math_helpers_test.cpp +++ b/runtime/tests/math_helpers_test.cpp @@ -26,7 +26,7 @@ void TestBitShl() ss << "Test bit_shl with sizeof(T) = "; ss << sizeof(T); - using unsigned_type = std::make_unsigned_t; + using UnsignedType = std::make_unsigned_t; { T v = 1; @@ -37,21 +37,21 @@ void TestBitShl() { T v = 1; - size_t shift = std::numeric_limits::digits - 1; + size_t shift = std::numeric_limits::digits - 1; T res = bit_shl()(v, shift); EXPECT_EQ(res, static_cast(v << shift)) << ss.str(); } { T v = 1; - size_t shift = std::numeric_limits::digits; + size_t shift = std::numeric_limits::digits; T res = bit_shl()(v, shift); EXPECT_EQ(res, v) << ss.str(); } { T v = 1; - size_t shift = std::numeric_limits::digits + 2; + size_t shift = std::numeric_limits::digits + 2; T res = bit_shl()(v, shift); EXPECT_EQ(res, v << 2) << ss.str(); } @@ -64,7 +64,7 @@ void TestBitShr() ss << "Test bit_shr with sizeof(T) = "; ss << sizeof(T); - using unsigned_type = std::make_unsigned_t; + using UnsignedType = std::make_unsigned_t; { T v = 64; @@ -75,21 +75,21 @@ void TestBitShr() { T v = std::numeric_limits::min(); - T shift = std::numeric_limits::digits - 1; + T shift = std::numeric_limits::digits - 1; T res = bit_shr()(v, shift); EXPECT_EQ(res, 1) << ss.str(); } { T v = 1; - T shift = std::numeric_limits::digits; + T shift = std::numeric_limits::digits; T res = bit_shr()(v, shift); EXPECT_EQ(res, v) << ss.str(); } { T v = 20; - T shift = std::numeric_limits::digits + 2; + T shift = std::numeric_limits::digits + 2; T res = bit_shr()(v, shift); EXPECT_EQ(res, v >> 2) << ss.str(); } @@ -102,7 +102,7 @@ void TestBitAshr() ss << "Test bit_ashr with sizeof(T) = "; ss << sizeof(T); - using unsigned_type = std::make_unsigned_t; + using UnsignedType = std::make_unsigned_t; { T v = 64; @@ -113,21 +113,21 @@ void TestBitAshr() { T v = std::numeric_limits::min(); - T shift = std::numeric_limits::digits - 1; + T shift = std::numeric_limits::digits - 1; T res = bit_ashr()(v, shift); EXPECT_EQ(res, -1) << ss.str(); } { T v = 1; - T shift = std::numeric_limits::digits; + T shift = std::numeric_limits::digits; T res = bit_ashr()(v, shift); EXPECT_EQ(res, v) << ss.str(); } { T v = 20; - T shift = std::numeric_limits::digits + 2; + T shift = std::numeric_limits::digits + 2; T res = bit_ashr()(v, shift); EXPECT_EQ(res, v >> 2) << ss.str(); } diff --git a/runtime/tests/mem_stats_additional_info_test.cpp b/runtime/tests/mem_stats_additional_info_test.cpp index a4934e64676eb37bfe0221cbf3cb7f967398b1f5..38c8a7a91a75e118dbfdb208ca49a22256a81fbc 100644 --- a/runtime/tests/mem_stats_additional_info_test.cpp +++ b/runtime/tests/mem_stats_additional_info_test.cpp @@ -89,10 +89,10 @@ TEST_F(MemStatsAdditionalInfoTest, AdditionalStatistic) [[maybe_unused]] HandleScope scope(thread_); [[maybe_unused]] VMHandle handle(thread_, string_object); #ifndef NDEBUG - Class *stringClass = Runtime::GetCurrent()->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::STRING); + Class *string_class = Runtime::GetCurrent()->GetClassLinker()->GetExtension(ctx)->GetClassRoot(ClassRoot::STRING); auto statistics = thread_->GetVM()->GetMemStats()->GetStatistics(vm->GetHeapManager()); // allocated - ASSERT_TRUE(statistics.find(stringClass->GetName()) != std::string::npos); + ASSERT_TRUE(statistics.find(string_class->GetName()) != std::string::npos); ASSERT_TRUE(statistics.find("footprint") != std::string::npos); ASSERT_TRUE(statistics.find("1") != std::string::npos); #endif diff --git a/runtime/tests/mem_stats_gc_test.cpp b/runtime/tests/mem_stats_gc_test.cpp index 38971b44ca872d81769de836a38842edbfbef9bb..40980c4555cfa1f75d19e96fcee40a8f50229a6a 100644 --- a/runtime/tests/mem_stats_gc_test.cpp +++ b/runtime/tests/mem_stats_gc_test.cpp @@ -38,28 +38,28 @@ public: options.SetRunGcInPlace(true); bool success = Runtime::Create(options); ASSERT_TRUE(success) << "Cannot create Runtime"; - thread_ = panda::MTManagedThread::GetCurrent(); - thread_->ManagedCodeBegin(); + thread = panda::MTManagedThread::GetCurrent(); + thread->ManagedCodeBegin(); } - template + template void MemStatsTest(uint64_t tries, size_t object_size); void TearDown() override { - thread_->ManagedCodeEnd(); + thread->ManagedCodeEnd(); bool success = Runtime::Destroy(); ASSERT_TRUE(success) << "Cannot destroy Runtime"; } - panda::MTManagedThread *thread_; + panda::MTManagedThread *thread; }; -template +template void MemStatsGCTest::MemStatsTest(uint64_t tries, size_t object_size) { ASSERT(object_size >= sizeof(coretypes::String)); - mem::MemStatsType *stats = thread_->GetVM()->GetMemStats(); + mem::MemStatsType *stats = thread->GetVM()->GetMemStats(); ASSERT_NE(stats, nullptr); auto class_linker = Runtime::GetCurrent()->GetClassLinker(); @@ -71,8 +71,8 @@ void MemStatsGCTest::MemStatsTest(uint64_t tries, size_t object_size) simple_string.append("x"); } LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); - auto object_allocator = thread_->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); - thread_->GetVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); + auto object_allocator = thread->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); + thread->GetVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); size_t alloc_size = simple_string.size() + sizeof(coretypes::String); size_t aligment_size = 0; @@ -90,25 +90,25 @@ void MemStatsGCTest::MemStatsTest(uint64_t tries, size_t object_size) uint64_t freed_objects = stats->GetTotalObjectsFreed(); uint64_t freed_bytes = stats->GetFreed(SpaceType::SPACE_TYPE_OBJECT); uint64_t diff_total = 0; - std::array *, object_count> handlers; + std::array *, OBJECT_COUNT> handlers; for (size_t i = 0; i < tries; i++) { - [[maybe_unused]] HandleScope scope(thread_); - for (uint64_t j = 0; j < object_count; j++) { + [[maybe_unused]] HandleScope scope(thread); + for (uint64_t j = 0; j < OBJECT_COUNT; j++) { coretypes::String *string_obj = coretypes::String::CreateFromMUtf8(reinterpret_cast(&simple_string[0]), simple_string.length(), ctx, Runtime::GetCurrent()->GetPandaVM()); ASSERT_NE(string_obj, nullptr); - handlers[j] = allocator->New>(thread_, string_obj); + handlers[j] = allocator->New>(thread, string_obj); } - allocated_objects += object_count; - allocated_bytes += object_count * alloc_size; - diff_total += object_count * aligment_diff; + allocated_objects += OBJECT_COUNT; + allocated_bytes += OBJECT_COUNT * alloc_size; + diff_total += OBJECT_COUNT * aligment_diff; ASSERT_EQ(allocated_objects, stats->GetTotalObjectsAllocated()); ASSERT_LE(allocated_bytes, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); ASSERT_GE(allocated_bytes + diff_total, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); // run GC - thread_->GetVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); + thread->GetVM()->GetGC()->WaitForGCInManaged(GCTask(GCTaskCause::EXPLICIT_CAUSE)); ASSERT_EQ(allocated_objects, stats->GetTotalObjectsAllocated()); ASSERT_LE(allocated_bytes, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); ASSERT_GE(allocated_bytes + diff_total, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); @@ -116,11 +116,11 @@ void MemStatsGCTest::MemStatsTest(uint64_t tries, size_t object_size) ASSERT_LE(freed_bytes, stats->GetFreed(SpaceType::SPACE_TYPE_OBJECT)); ASSERT_GE(freed_bytes + diff_total, stats->GetFreed(SpaceType::SPACE_TYPE_OBJECT)); - for (uint64_t j = 0; j < object_count; j++) { + for (uint64_t j = 0; j < OBJECT_COUNT; j++) { allocator->Delete(handlers[j]); } - freed_objects += object_count; - freed_bytes += object_count * alloc_size; + freed_objects += OBJECT_COUNT; + freed_bytes += OBJECT_COUNT * alloc_size; } } diff --git a/runtime/tests/mem_stats_gen_gc_test.cpp b/runtime/tests/mem_stats_gen_gc_test.cpp index 4301374a3aac2647fe8e2620a93917466fec2da6..b98f7b44fcf7cca9381022376fe52b4c01afcb7b 100644 --- a/runtime/tests/mem_stats_gen_gc_test.cpp +++ b/runtime/tests/mem_stats_gen_gc_test.cpp @@ -71,45 +71,45 @@ public: }; struct RealStatsLocations { - uint32_t *young_freed_objects_count_; - uint64_t *young_freed_objects_size_; - uint32_t *young_moved_objects_count_; - uint64_t *young_moved_objects_size_; - uint32_t *tenured_freed_objects_count_; - uint64_t *tenured_freed_objects_size_; + uint32_t *young_freed_objects_count; + uint64_t *young_freed_objects_size; + uint32_t *young_moved_objects_count; + uint64_t *young_moved_objects_size; + uint32_t *tenured_freed_objects_count; + uint64_t *tenured_freed_objects_size; }; - void SetupRuntime(const std::string &gc_type) + void SetupRuntime(const std::string &gc_type_param) { RuntimeOptions options; options.SetShouldLoadBootPandaFiles(false); options.SetShouldInitializeIntrinsics(false); options.SetUseTlabForAllocations(false); - options.SetGcType(gc_type); + options.SetGcType(gc_type_param); options.SetGcTriggerType("debug-never"); options.SetRunGcInPlace(true); options.SetCompilerEnableJit(false); [[maybe_unused]] bool success = Runtime::Create(options); ASSERT(success); - thread_ = panda::MTManagedThread::GetCurrent(); - gc_type_ = Runtime::GetGCType(options, plugins::RuntimeTypeToLang(options.GetRuntimeType())); - [[maybe_unused]] auto gc = thread_->GetVM()->GetGC(); - ASSERT(gc->GetType() == panda::mem::GCTypeFromString(gc_type)); - ASSERT(gc->IsGenerational()); - thread_->ManagedCodeBegin(); + thread = panda::MTManagedThread::GetCurrent(); + gc_type = Runtime::GetGCType(options, plugins::RuntimeTypeToLang(options.GetRuntimeType())); + [[maybe_unused]] auto gc_local = thread->GetVM()->GetGC(); + ASSERT(gc_local->GetType() == panda::mem::GCTypeFromString(gc_type_param)); + ASSERT(gc_local->IsGenerational()); + thread->ManagedCodeBegin(); } void ResetRuntime() { DeleteHandles(); - internal_allocator_->Delete(gccnt_); - thread_->ManagedCodeEnd(); + internal_allocator->Delete(gccnt); + thread->ManagedCodeEnd(); bool success = Runtime::Destroy(); ASSERT_TRUE(success) << "Cannot destroy Runtime"; } - template + template ObjVec MakeAllocationsWithRepeats(size_t min_size, size_t max_size, size_t count, size_t *allocated, size_t *requested, F space_checker, bool check_oom_in_tenured); @@ -125,7 +125,7 @@ public: bool NeedToCheckYoungFreedCount() { - return (gc_type_ != GCType::G1_GC) || Runtime::GetOptions().IsG1TrackFreedObjects(); + return (gc_type != GCType::G1_GC) || Runtime::GetOptions().IsG1TrackFreedObjects(); } template @@ -147,23 +147,23 @@ public: void TearDown() override {} - panda::MTManagedThread *thread_; - GCType gc_type_; - - LanguageContext ctx_ {nullptr}; - ObjectAllocatorBase *object_allocator_; - mem::InternalAllocatorPtr internal_allocator_; - PandaVM *vm_; - GC *gc_; - std::vector handles_; - MemStatsType *ms_; - GCStats *gc_ms_; - coretypes::Array *root_ = nullptr; - size_t root_size_ = 0; - GCCounter *gccnt_; + panda::MTManagedThread *thread; + GCType gc_type; + + LanguageContext ctx {nullptr}; + ObjectAllocatorBase *object_allocator; + mem::InternalAllocatorPtr internal_allocator; + PandaVM *vm; + GC *gc; + std::vector handles; + MemStatsType *ms; + GCStats *gc_ms; + coretypes::Array *root = nullptr; + size_t root_size = 0; + GCCounter *gccnt; }; -template +template MemStatsGenGCTest::ObjVec MemStatsGenGCTest::MakeAllocationsWithRepeats(size_t min_size, size_t max_size, size_t count, size_t *allocated, size_t *requested, [[maybe_unused]] F space_checker, @@ -185,23 +185,23 @@ MemStatsGenGCTest::ObjVec MemStatsGenGCTest::MakeAllocationsWithRepeats(size_t m } } ObjVec result; - result.reserve(count * repeat); + result.reserve(count * REPEAT); for (size_t j = 0; j < count; ++j) { size_t size = obj_templates[j].length() + sizeof(coretypes::String); if (check_oom_in_tenured) { // Leaving 5MB in tenured seems OK auto free = - reinterpret_cast(object_allocator_->GetHeapSpace())->GetCurrentFreeTenuredSize(); + reinterpret_cast(object_allocator->GetHeapSpace())->GetCurrentFreeTenuredSize(); if (size + 5000000 > free) { return result; } } - for (size_t i = 0; i < repeat; ++i) { + for (size_t i = 0; i < REPEAT; ++i) { coretypes::String *string_obj = coretypes::String::CreateFromMUtf8( - reinterpret_cast(&obj_templates[j][0]), obj_templates[j].length(), ctx_, vm_); + reinterpret_cast(&obj_templates[j][0]), obj_templates[j].length(), ctx, vm); ASSERT(string_obj != nullptr); ASSERT(space_checker(ToUintPtr(string_obj)) == true); - if (gc_type_ == GCType::G1_GC && SPACE == TargetSpace::HUMONGOUS) { + if (gc_type == GCType::G1_GC && SPACE == TargetSpace::HUMONGOUS) { // for humongous objects in G1 we calculate size of the region instead of just alignment size Region *region = AddrToRegion(string_obj); *allocated += region->Size(); @@ -219,11 +219,11 @@ void MemStatsGenGCTest::InitRoot() { ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker(); Class *klass = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY) - ->GetClass(ctx_.GetStringArrayClassDescriptor()); + ->GetClass(ctx.GetStringArrayClassDescriptor()); ASSERT_NE(klass, nullptr); - root_ = coretypes::Array::Create(klass, ROOT_MAX_SIZE); - root_size_ = 0; - MakeObjectsPermAlive({root_}); + root = coretypes::Array::Create(klass, ROOT_MAX_SIZE); + root_size = 0; + MakeObjectsPermAlive({root}); } void MemStatsGenGCTest::MakeObjectsAlive(ObjVec objects, int every) @@ -234,9 +234,9 @@ void MemStatsGenGCTest::MakeObjectsAlive(ObjVec objects, int every) if (cnt != 0) { continue; } - root_->Set(root_size_, obj); - root_size_++; - ASSERT(root_size_ < ROOT_MAX_SIZE); + root->Set(root_size, obj); + root_size++; + ASSERT(root_size < ROOT_MAX_SIZE); cnt = every; } } @@ -249,7 +249,7 @@ void MemStatsGenGCTest::MakeObjectsGarbage(size_t start_idx, size_t after_end_id if (cnt != 0) { continue; } - root_->Set(i, 0); + root->Set(i, 0); cnt = every; } } @@ -264,15 +264,15 @@ void MemStatsGenGCTest::MakeObjectsPermAlive(ObjVec objects, int every) if (cnt != 0) { continue; } - result.push_back(internal_allocator_->New>(thread_, obj)); + result.push_back(internal_allocator->New>(thread, obj)); cnt = every; } - handles_.push_back(result); + handles.push_back(result); } void MemStatsGenGCTest::DumpHandles() { - for (auto &hv : handles_) { + for (auto &hv : handles) { for (auto *handle : hv) { std::cout << "vector " << (void *)&hv << " handle " << (void *)handle << " obj " << handle->GetPtr() << std::endl; @@ -282,22 +282,22 @@ void MemStatsGenGCTest::DumpHandles() void MemStatsGenGCTest::DumpAliveObjects() { - std::cout << "Alive root array : " << handles_[0][0]->GetPtr() << std::endl; - for (size_t i = 0; i < root_size_; ++i) { - if (root_->Get(i) != nullptr) { - std::cout << "Alive idx " << i << " : " << root_->Get(i) << std::endl; + std::cout << "Alive root array : " << handles[0][0]->GetPtr() << std::endl; + for (size_t i = 0; i < root_size; ++i) { + if (root->Get(i) != nullptr) { + std::cout << "Alive idx " << i << " : " << root->Get(i) << std::endl; } } } void MemStatsGenGCTest::DeleteHandles() { - for (auto &hv : handles_) { + for (auto &hv : handles) { for (auto *handle : hv) { - internal_allocator_->Delete(handle); + internal_allocator->Delete(handle); } } - handles_.clear(); + handles.clear(); } template @@ -305,15 +305,15 @@ void MemStatsGenGCTest::PrepareTest() { if constexpr (std::is_same::value) { DeleteHandles(); - ctx_ = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); - object_allocator_ = thread_->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); - vm_ = Runtime::GetCurrent()->GetPandaVM(); - internal_allocator_ = Runtime::GetCurrent()->GetClassLinker()->GetAllocator(); - gc_ = vm_->GetGC(); - ms_ = vm_->GetMemStats(); - gc_ms_ = vm_->GetGCStats(); - gccnt_ = internal_allocator_->New(); - gc_->AddListener(gccnt_); + ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); + object_allocator = thread->GetVM()->GetHeapManager()->GetObjectAllocator().AsObjectAllocator(); + vm = Runtime::GetCurrent()->GetPandaVM(); + internal_allocator = Runtime::GetCurrent()->GetClassLinker()->GetAllocator(); + gc = vm->GetGC(); + ms = vm->GetMemStats(); + gc_ms = vm->GetGCStats(); + gccnt = internal_allocator->New(); + gc->AddListener(gccnt); InitRoot(); } else { UNREACHABLE(); @@ -324,14 +324,14 @@ template typename GenerationalGC::MemStats *MemStatsGenGCTest::GetGenMemStats() { // An explicit getter, because the typename has to be template-specialized - return &reinterpret_cast *>(gc_)->mem_stats_; + return &reinterpret_cast *>(gc)->mem_stats_; } bool MemStatsGenGCTest::IsInYoung(uintptr_t addr) { - switch (gc_type_) { + switch (gc_type) { case GCType::GEN_GC: { - return object_allocator_->IsAddressInYoungSpace(addr); + return object_allocator->IsAddressInYoungSpace(addr); } case GCType::G1_GC: { auto mem_pool = PoolManager::GetMmapMemPool(); @@ -349,7 +349,7 @@ bool MemStatsGenGCTest::IsInYoung(uintptr_t addr) template typename MemStatsGenGCTest::MemOpReport MemStatsGenGCTest::MakeAllocations() { - [[maybe_unused]] int gc_cnt = gccnt_->count; + [[maybe_unused]] int gc_cnt = gccnt->count; MemStatsGenGCTest::MemOpReport report; report.allocated_count = 0; report.allocated_bytes = 0; @@ -362,11 +362,11 @@ typename MemStatsGenGCTest::MemOpReport MemStatsGenGCTest::MakeAllocations() size_t max_size = 0; bool check_oom = false; size_t young_size = reinterpret_cast( - reinterpret_cast(object_allocator_)->GetHeapSpace()) + reinterpret_cast(object_allocator)->GetHeapSpace()) ->GetCurrentMaxYoungSize(); - switch (gc_type_) { + switch (gc_type) { case GCType::GEN_GC: { - auto gen_alloc = reinterpret_cast *>(object_allocator_); + auto gen_alloc = reinterpret_cast *>(object_allocator); count = 15; if constexpr (SPACE == TargetSpace::YOUNG) { min_size = 0; @@ -398,7 +398,7 @@ typename MemStatsGenGCTest::MemOpReport MemStatsGenGCTest::MakeAllocations() break; } case GCType::G1_GC: { - auto g1_alloc = reinterpret_cast *>(object_allocator_); + auto g1_alloc = reinterpret_cast *>(object_allocator); count = 15; if constexpr (SPACE == TargetSpace::YOUNG) { min_size = 0; @@ -479,7 +479,7 @@ typename MemStatsGenGCTest::MemOpReport MemStatsGenGCTest::MakeAllocations() } // We must not have uncounted GCs - ASSERT(gc_cnt == gccnt_->count); + ASSERT(gc_cnt == gccnt->count); return report; } @@ -491,12 +491,12 @@ typename MemStatsGenGCTest::MemOpReport MemStatsGenGCTest::HelpAllocTenured() report.saved_count = 0; report.saved_bytes = 0; - auto old_root_size = root_size_; + auto old_root_size = root_size; // One way to get objects into tenured space - by promotion auto r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); - MakeObjectsGarbage(old_root_size, old_root_size + (root_size_ - old_root_size) / 2); + gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); + MakeObjectsGarbage(old_root_size, old_root_size + (root_size - old_root_size) / 2); report.allocated_count = r.saved_count; report.allocated_bytes = r.saved_bytes; @@ -532,108 +532,108 @@ template MemStatsGenGCTest::RealStatsLocations MemStatsGenGCTest::GetGenMemStatsDetails(T gms) { RealStatsLocations loc; - loc.young_freed_objects_count_ = &gms->young_free_object_count_; - loc.young_freed_objects_size_ = &gms->young_free_object_size_; - loc.young_moved_objects_count_ = &gms->young_move_object_count_; - loc.young_moved_objects_size_ = &gms->young_move_object_size_; - loc.tenured_freed_objects_count_ = &gms->tenured_free_object_count_; - loc.tenured_freed_objects_size_ = &gms->tenured_free_object_size_; + loc.young_freed_objects_count = &gms->young_free_object_count_; + loc.young_freed_objects_size = &gms->young_free_object_size_; + loc.young_moved_objects_count = &gms->young_move_object_count_; + loc.young_moved_objects_size = &gms->young_move_object_size_; + loc.tenured_freed_objects_count = &gms->tenured_free_object_count_; + loc.tenured_freed_objects_size = &gms->tenured_free_object_size_; return loc; } TEST_F(MemStatsGenGCTest, TrivialStatsGenGcTest) { for (int gctype_idx = 0; static_cast(gctype_idx) <= GCType::GCTYPE_LAST; ++gctype_idx) { - GCType gc_type = static_cast(gctype_idx); - if (gc_type == GCType::INVALID_GC) { + GCType gc_type_local = static_cast(gctype_idx); + if (gc_type_local == GCType::INVALID_GC) { continue; } - if (!IsGenerationalGCType(gc_type)) { + if (!IsGenerationalGCType(gc_type_local)) { continue; } - std::string gctype = static_cast(GCStringFromType(gc_type)); + std::string gctype = static_cast(GCStringFromType(gc_type_local)); SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); auto *gen_ms = GetGenMemStats(); RealStatsLocations loc = GetGenMemStatsDetails(gen_ms); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); // Heap doesn't have unexpected garbage now + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); // Heap doesn't have unexpected garbage now // Make a trivial allocation of unaligned size and make it garbage auto r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); - ASSERT_EQ(2, gccnt_->count); + gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); + ASSERT_EQ(2, gccnt->count); if (NeedToCheckYoungFreedCount()) { - ASSERT_EQ(*loc.young_freed_objects_count_, r.allocated_count); + ASSERT_EQ(*loc.young_freed_objects_count, r.allocated_count); } - ASSERT_EQ(*loc.young_freed_objects_size_, r.allocated_bytes); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_size_, 0); + ASSERT_EQ(*loc.young_freed_objects_size, r.allocated_bytes); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, 0); + ASSERT_EQ(*loc.tenured_freed_objects_size, 0); if (NeedToCheckYoungFreedCount()) { - ASSERT_EQ(gc_ms_->GetObjectsFreedCount(), r.allocated_count); + ASSERT_EQ(gc_ms->GetObjectsFreedCount(), r.allocated_count); } - ASSERT_EQ(gc_ms_->GetObjectsFreedBytes(), r.allocated_bytes); - ASSERT_EQ(gc_ms_->GetLargeObjectsFreedCount(), 0); - ASSERT_EQ(gc_ms_->GetLargeObjectsFreedBytes(), 0); + ASSERT_EQ(gc_ms->GetObjectsFreedBytes(), r.allocated_bytes); + ASSERT_EQ(gc_ms->GetLargeObjectsFreedCount(), 0); + ASSERT_EQ(gc_ms->GetLargeObjectsFreedBytes(), 0); // Make a trivial allocation of unaligned size and make it alive r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); - ASSERT_EQ(3, gccnt_->count); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, r.saved_count); - ASSERT_EQ(*loc.young_moved_objects_size_, r.saved_bytes); - ASSERT_EQ(*loc.tenured_freed_objects_count_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_size_, 0); + gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); + ASSERT_EQ(3, gccnt->count); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, r.saved_count); + ASSERT_EQ(*loc.young_moved_objects_size, r.saved_bytes); + ASSERT_EQ(*loc.tenured_freed_objects_count, 0); + ASSERT_EQ(*loc.tenured_freed_objects_size, 0); // Expecting that r.saved_bytes/count have been promoted into tenured // Make them garbage - MakeObjectsGarbage(0, root_size_); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(4, gccnt_->count); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, r.saved_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_, r.saved_bytes); + MakeObjectsGarbage(0, root_size); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(4, gccnt->count); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, r.saved_count); + ASSERT_EQ(*loc.tenured_freed_objects_size, r.saved_bytes); // Make a trivial allocation of unaligned size in tenured space and make it garbage r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(5, gccnt_->count); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, r.allocated_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_, r.allocated_bytes); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(5, gccnt->count); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, r.allocated_count); + ASSERT_EQ(*loc.tenured_freed_objects_size, r.allocated_bytes); // Make a trivial allocation of unaligned size large object and make it garbage r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(6, gccnt_->count); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, r.allocated_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_, r.allocated_bytes); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(6, gccnt->count); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, r.allocated_count); + ASSERT_EQ(*loc.tenured_freed_objects_size, r.allocated_bytes); r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, r.allocated_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_, r.allocated_bytes); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, r.allocated_count); + ASSERT_EQ(*loc.tenured_freed_objects_size, r.allocated_bytes); } ResetRuntime(); } @@ -652,24 +652,24 @@ TEST_F(MemStatsGenGCTest, YoungStatsGenGcTest) SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); auto *gen_ms = GetGenMemStats(); RealStatsLocations loc = GetGenMemStatsDetails(gen_ms); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); // Young shall be empty now. auto r = MakeAllocations(); - gc_->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(GCTaskCause::YOUNG_GC_CAUSE)); if (NeedToCheckYoungFreedCount()) { - ASSERT_EQ(*loc.young_freed_objects_count_, r.allocated_count - r.saved_count); + ASSERT_EQ(*loc.young_freed_objects_count, r.allocated_count - r.saved_count); } - ASSERT_EQ(*loc.young_freed_objects_size_, r.allocated_bytes - r.saved_bytes); - ASSERT_EQ(*loc.young_moved_objects_count_, r.saved_count); - ASSERT_EQ(*loc.young_moved_objects_size_, r.saved_bytes); - ASSERT_EQ(*loc.tenured_freed_objects_count_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_size_, 0); + ASSERT_EQ(*loc.young_freed_objects_size, r.allocated_bytes - r.saved_bytes); + ASSERT_EQ(*loc.young_moved_objects_count, r.saved_count); + ASSERT_EQ(*loc.young_moved_objects_size, r.saved_bytes); + ASSERT_EQ(*loc.tenured_freed_objects_count, 0); + ASSERT_EQ(*loc.tenured_freed_objects_size, 0); } ResetRuntime(); @@ -689,57 +689,57 @@ TEST_F(MemStatsGenGCTest, TenuredStatsFullGenGcTest) SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); auto *gen_ms = GetGenMemStats(); RealStatsLocations loc = GetGenMemStatsDetails(gen_ms); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); // Young shall be empty now. uint32_t t_count = 0; uint64_t t_bytes = 0; for (int i = 0; i < FULL_TEST_ALLOC_TIMES; ++i) { - [[maybe_unused]] int gc_cnt = gccnt_->count; + [[maybe_unused]] int gc_cnt = gccnt->count; auto r = HelpAllocTenured(); // HelpAllocTenured shall trigger young gc, which is allowed to be mixed - ASSERT(gc_cnt + 1 == gccnt_->count); - auto tfoc_y = *loc.tenured_freed_objects_count_; - auto tfos_y = *loc.tenured_freed_objects_size_; + ASSERT(gc_cnt + 1 == gccnt->count); + auto tfoc_y = *loc.tenured_freed_objects_count; + auto tfos_y = *loc.tenured_freed_objects_size; ASSERT(r.allocated_count > 0); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_ + tfoc_y, r.allocated_count - r.saved_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_ + tfos_y, r.allocated_bytes - r.saved_bytes); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count + tfoc_y, r.allocated_count - r.saved_count); + ASSERT_EQ(*loc.tenured_freed_objects_size + tfos_y, r.allocated_bytes - r.saved_bytes); t_count += r.saved_count; t_bytes += r.saved_bytes; } // Empty everything auto ry = MakeAllocations(); - MakeObjectsGarbage(0, root_size_); + MakeObjectsGarbage(0, root_size); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); if (NeedToCheckYoungFreedCount()) { - ASSERT_EQ(*loc.young_freed_objects_count_, ry.allocated_count); + ASSERT_EQ(*loc.young_freed_objects_count, ry.allocated_count); } - ASSERT_EQ(*loc.young_freed_objects_size_, ry.allocated_bytes); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, t_count); - ASSERT_EQ(*loc.tenured_freed_objects_size_, t_bytes); - - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_size_, 0); + ASSERT_EQ(*loc.young_freed_objects_size, ry.allocated_bytes); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, t_count); + ASSERT_EQ(*loc.tenured_freed_objects_size, t_bytes); + + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, 0); + ASSERT_EQ(*loc.tenured_freed_objects_size, 0); } ResetRuntime(); @@ -763,10 +763,10 @@ TEST_F(MemStatsGenGCTest, TenuredStatsMixGenGcTest) SetupRuntime(gctype); { - HandleScope scope(thread_); + HandleScope scope(thread); PrepareTest(); GCTaskCause mixed_cause; - switch (gc_type_) { + switch (gc_type) { case GCType::GEN_GC: { UNREACHABLE(); // Doesn't have mixed GC collection } @@ -780,7 +780,7 @@ TEST_F(MemStatsGenGCTest, TenuredStatsMixGenGcTest) auto *gen_ms = GetGenMemStats(); RealStatsLocations loc = GetGenMemStatsDetails(gen_ms); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); // Young shall be empty now. uint32_t t_count = 0; @@ -792,74 +792,74 @@ TEST_F(MemStatsGenGCTest, TenuredStatsMixGenGcTest) uint32_t expected_dead_count = 0; uint64_t expected_dead_bytes = 0; for (int i = 0; i < MIX_TEST_ALLOC_TIMES; ++i) { - [[maybe_unused]] int gc_cnt = gccnt_->count; + [[maybe_unused]] int gc_cnt = gccnt->count; auto r = HelpAllocTenured(); // HelpAllocTenured shall trigger young gc, which is allowed to be mixed - ASSERT(gc_cnt + 1 == gccnt_->count); - dead_count += *loc.tenured_freed_objects_count_; - dead_bytes += *loc.tenured_freed_objects_size_; + ASSERT(gc_cnt + 1 == gccnt->count); + dead_count += *loc.tenured_freed_objects_count; + dead_bytes += *loc.tenured_freed_objects_size; // Mixed can free not all the tenured garbage, so run it until it stalls do { - gc_->WaitForGCInManaged(GCTask(mixed_cause)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - dead_count += *loc.tenured_freed_objects_count_; - dead_bytes += *loc.tenured_freed_objects_size_; - } while (*loc.tenured_freed_objects_count_ != 0); + gc->WaitForGCInManaged(GCTask(mixed_cause)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + dead_count += *loc.tenured_freed_objects_count; + dead_bytes += *loc.tenured_freed_objects_size; + } while (*loc.tenured_freed_objects_count != 0); t_count += r.saved_count; t_bytes += r.saved_bytes; expected_dead_count += r.allocated_count - r.saved_count; expected_dead_bytes += r.allocated_bytes - r.saved_bytes; } - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - dead_count += *loc.tenured_freed_objects_count_; - dead_bytes += *loc.tenured_freed_objects_size_; + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + dead_count += *loc.tenured_freed_objects_count; + dead_bytes += *loc.tenured_freed_objects_size; ASSERT_EQ(dead_count, expected_dead_count); ASSERT_EQ(dead_bytes, expected_dead_bytes); } // Empty everything auto ry = MakeAllocations(); - MakeObjectsGarbage(0, root_size_); + MakeObjectsGarbage(0, root_size); { uint32_t dead_count = 0; uint64_t dead_bytes = 0; do { - gc_->WaitForGCInManaged(GCTask(mixed_cause)); + gc->WaitForGCInManaged(GCTask(mixed_cause)); if (NeedToCheckYoungFreedCount()) { - ASSERT_EQ(*loc.young_freed_objects_count_, ry.allocated_count); + ASSERT_EQ(*loc.young_freed_objects_count, ry.allocated_count); } - ASSERT_EQ(*loc.young_freed_objects_size_, ry.allocated_bytes); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - dead_count += *loc.tenured_freed_objects_count_; - dead_bytes += *loc.tenured_freed_objects_size_; + ASSERT_EQ(*loc.young_freed_objects_size, ry.allocated_bytes); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + dead_count += *loc.tenured_freed_objects_count; + dead_bytes += *loc.tenured_freed_objects_size; ry.allocated_count = 0; ry.allocated_bytes = 0; - } while (*loc.tenured_freed_objects_count_ != 0); - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - dead_count += *loc.tenured_freed_objects_count_; - dead_bytes += *loc.tenured_freed_objects_size_; + } while (*loc.tenured_freed_objects_count != 0); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + dead_count += *loc.tenured_freed_objects_count; + dead_bytes += *loc.tenured_freed_objects_size; ASSERT_EQ(dead_count, t_count); ASSERT_EQ(dead_bytes, t_bytes); } - gc_->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); - ASSERT_EQ(*loc.young_freed_objects_count_, 0); - ASSERT_EQ(*loc.young_freed_objects_size_, 0); - ASSERT_EQ(*loc.young_moved_objects_count_, 0); - ASSERT_EQ(*loc.young_moved_objects_size_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_count_, 0); - ASSERT_EQ(*loc.tenured_freed_objects_size_, 0); + gc->WaitForGCInManaged(GCTask(FULL_GC_CAUSE)); + ASSERT_EQ(*loc.young_freed_objects_count, 0); + ASSERT_EQ(*loc.young_freed_objects_size, 0); + ASSERT_EQ(*loc.young_moved_objects_count, 0); + ASSERT_EQ(*loc.young_moved_objects_size, 0); + ASSERT_EQ(*loc.tenured_freed_objects_count, 0); + ASSERT_EQ(*loc.tenured_freed_objects_size, 0); } ResetRuntime(); diff --git a/runtime/tests/mem_stats_test.cpp b/runtime/tests/mem_stats_test.cpp index 75df58b42d6d9f518571b9e8086027704d32238c..4947cd9d6f9d7195501c62f3e61170e22e27a97d 100644 --- a/runtime/tests/mem_stats_test.cpp +++ b/runtime/tests/mem_stats_test.cpp @@ -69,70 +69,69 @@ protected: using MallocProxyNonObjectAllocator = MallocProxyAllocator; class RawStatsBeforeTest { - size_t raw_bytes_allocated_before_test; - size_t raw_bytes_freed_before_test; - size_t raw_bytes_footprint_before_rest; + size_t raw_bytes_allocated_before_test_; + size_t raw_bytes_freed_before_test_; + size_t raw_bytes_footprint_before_rest_; public: explicit RawStatsBeforeTest(MemStatsType *stats) - : raw_bytes_allocated_before_test(stats->GetAllocated(SpaceType::SPACE_TYPE_INTERNAL)), - raw_bytes_freed_before_test(stats->GetFreed(SpaceType::SPACE_TYPE_INTERNAL)), - raw_bytes_footprint_before_rest(stats->GetFootprint(SpaceType::SPACE_TYPE_INTERNAL)) + : raw_bytes_allocated_before_test_(stats->GetAllocated(SpaceType::SPACE_TYPE_INTERNAL)), + raw_bytes_freed_before_test_(stats->GetFreed(SpaceType::SPACE_TYPE_INTERNAL)), + raw_bytes_footprint_before_rest_(stats->GetFootprint(SpaceType::SPACE_TYPE_INTERNAL)) { } [[nodiscard]] size_t GetRawBytesAllocatedBeforeTest() const { - return raw_bytes_allocated_before_test; + return raw_bytes_allocated_before_test_; } [[nodiscard]] size_t GetRawBytesFreedBeforeTest() const { - return raw_bytes_freed_before_test; + return raw_bytes_freed_before_test_; } [[nodiscard]] size_t GetRawBytesFootprintBeforeTest() const { - return raw_bytes_footprint_before_rest; + return raw_bytes_footprint_before_rest_; } }; -void AssertHeapStats(MemStatsType *stats, size_t bytes_in_heap, size_t heap_bytes_allocated_, size_t heap_bytes_freed_) +void AssertHeapStats(MemStatsType *stats, size_t bytes_in_heap, size_t heap_bytes_allocated, size_t heap_bytes_freed) { - ASSERT_EQ(heap_bytes_allocated_, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); - ASSERT_EQ(heap_bytes_freed_, stats->GetFreed(SpaceType::SPACE_TYPE_OBJECT)); + ASSERT_EQ(heap_bytes_allocated, stats->GetAllocated(SpaceType::SPACE_TYPE_OBJECT)); + ASSERT_EQ(heap_bytes_freed, stats->GetFreed(SpaceType::SPACE_TYPE_OBJECT)); ASSERT_EQ(bytes_in_heap, stats->GetFootprint(SpaceType::SPACE_TYPE_OBJECT)); } -void AssertHeapHumongousStats(MemStatsType *stats, size_t bytes_in_heap, size_t heap_bytes_allocated_, - size_t heap_bytes_freed_) +void AssertHeapHumongousStats(MemStatsType *stats, size_t bytes_in_heap, size_t heap_bytes_allocated, + size_t heap_bytes_freed) { - ASSERT_EQ(heap_bytes_allocated_, stats->GetAllocated(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT)); - ASSERT_EQ(heap_bytes_freed_, stats->GetFreed(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT)); + ASSERT_EQ(heap_bytes_allocated, stats->GetAllocated(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT)); + ASSERT_EQ(heap_bytes_freed, stats->GetFreed(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT)); ASSERT_EQ(bytes_in_heap, stats->GetFootprint(SpaceType::SPACE_TYPE_HUMONGOUS_OBJECT)); } -void AssertHeapObjectsStats(MemStatsType *stats, size_t heap_objects_allocated_, size_t heap_objects_freed_, - size_t heap_humungous_objects_allocated_, size_t heap_humungous_objects_freed_) +void AssertHeapObjectsStats(MemStatsType *stats, size_t heap_objects_allocated, size_t heap_objects_freed, + size_t heap_humungous_objects_allocated, size_t heap_humungous_objects_freed) { - ASSERT_EQ(heap_objects_allocated_, stats->GetTotalObjectsAllocated()); - ASSERT_EQ(heap_objects_freed_, stats->GetTotalObjectsFreed()); + ASSERT_EQ(heap_objects_allocated, stats->GetTotalObjectsAllocated()); + ASSERT_EQ(heap_objects_freed, stats->GetTotalObjectsFreed()); // On arm-32 platform, we should cast the uint64_t(-1) to size_t(-1) - ASSERT_EQ(heap_objects_allocated_ - heap_humungous_objects_allocated_, + ASSERT_EQ(heap_objects_allocated - heap_humungous_objects_allocated, static_cast(stats->GetTotalRegularObjectsAllocated())); - ASSERT_EQ(heap_objects_freed_ - heap_humungous_objects_freed_, + ASSERT_EQ(heap_objects_freed - heap_humungous_objects_freed, static_cast(stats->GetTotalRegularObjectsFreed())); - ASSERT_EQ(heap_humungous_objects_allocated_, stats->GetTotalHumongousObjectsAllocated()); - ASSERT_EQ(heap_humungous_objects_freed_, stats->GetTotalHumongousObjectsFreed()); + ASSERT_EQ(heap_humungous_objects_allocated, stats->GetTotalHumongousObjectsAllocated()); + ASSERT_EQ(heap_humungous_objects_freed, stats->GetTotalHumongousObjectsFreed()); - ASSERT_EQ(heap_objects_allocated_ - heap_objects_freed_, stats->GetObjectsCountAlive()); - ASSERT_EQ(heap_objects_allocated_ - heap_objects_freed_ + heap_humungous_objects_allocated_ - - heap_humungous_objects_freed_, + ASSERT_EQ(heap_objects_allocated - heap_objects_freed, stats->GetObjectsCountAlive()); + ASSERT_EQ(heap_objects_allocated - heap_objects_freed + heap_humungous_objects_allocated - + heap_humungous_objects_freed, stats->GetRegularObjectsCountAlive()); - ASSERT_EQ(heap_humungous_objects_allocated_ - heap_humungous_objects_freed_, - stats->GetHumonguousObjectsCountAlive()); + ASSERT_EQ(heap_humungous_objects_allocated - heap_humungous_objects_freed, stats->GetHumonguousObjectsCountAlive()); } /** @@ -140,13 +139,13 @@ void AssertHeapObjectsStats(MemStatsType *stats, size_t heap_objects_allocated_, * destructors haven't be called yet. */ void AssertRawStats(MemStatsType *stats, size_t raw_bytes_allocated, size_t raw_bytes_freed, size_t raw_bytes_footprint, - RawStatsBeforeTest &statsBeforeTest) + RawStatsBeforeTest &stats_before_test) { - ASSERT_EQ(raw_bytes_allocated + statsBeforeTest.GetRawBytesAllocatedBeforeTest(), + ASSERT_EQ(raw_bytes_allocated + stats_before_test.GetRawBytesAllocatedBeforeTest(), stats->GetAllocated(SpaceType::SPACE_TYPE_INTERNAL)); - ASSERT_EQ(raw_bytes_freed + statsBeforeTest.GetRawBytesFreedBeforeTest(), + ASSERT_EQ(raw_bytes_freed + stats_before_test.GetRawBytesFreedBeforeTest(), stats->GetFreed(SpaceType::SPACE_TYPE_INTERNAL)); - ASSERT_EQ(raw_bytes_footprint + statsBeforeTest.GetRawBytesFootprintBeforeTest(), + ASSERT_EQ(raw_bytes_footprint + stats_before_test.GetRawBytesFootprintBeforeTest(), stats->GetFootprint(SpaceType::SPACE_TYPE_INTERNAL)); } @@ -203,7 +202,7 @@ TEST_F(MemStatsTest, NonObjectTestViaMallocAllocator) // testing MemStats via allocators. TEST_F(MemStatsTest, NonObjectTestViaSlotsAllocator) { - static constexpr uint64_t poolSize = SIZE_1M * 4; + static constexpr uint64_t POOL_SIZE = SIZE_1M * 4; static constexpr size_t REAL_BYTES_ALLOC1 = 23; // RunSlotsAllocator uses 32 bytes for allocation 23 bytes static constexpr size_t BYTES_IN_ALLOCATOR_ALLOC1 = 32; @@ -216,8 +215,8 @@ TEST_F(MemStatsTest, NonObjectTestViaSlotsAllocator) RawStatsBeforeTest raw_stats_before_test(stats); auto *allocator = new NonObjectAllocator(stats, SpaceType::SPACE_TYPE_INTERNAL); - void *mem = aligned_alloc(RUNSLOTS_ALIGNMENT_IN_BYTES, poolSize); - allocator->AddMemoryPool(mem, poolSize); + void *mem = aligned_alloc(RUNSLOTS_ALIGNMENT_IN_BYTES, POOL_SIZE); + allocator->AddMemoryPool(mem, POOL_SIZE); void *a1 = allocator->Alloc(REAL_BYTES_ALLOC1); allocator->Free(a1); diff --git a/runtime/tests/method_test.cpp b/runtime/tests/method_test.cpp index 557158dc2ded0249af75c306f3a951690a650825..1655128e608c93c66ac8d78767d53043ef492c0e 100644 --- a/runtime/tests/method_test.cpp +++ b/runtime/tests/method_test.cpp @@ -75,20 +75,20 @@ public: auto &prog = res.Value(); const std::string name = pandasm::GetFunctionSignatureFromName("foo", {}); ASSERT_NE(prog.function_table.find(name), prog.function_table.end()); - auto &insVec = prog.function_table.find(name)->second.ins; - const int insNum = insVec.size(); - ASSERT_EQ(lines.size(), insNum); + auto &ins_vec = prog.function_table.find(name)->second.ins; + const int ins_num = ins_vec.size(); + ASSERT_EQ(lines.size(), ins_num); - for (int i = 0; i < insNum; i++) { - insVec[i].ins_debug.SetLineNumber(lines[i]); + for (int i = 0; i < ins_num; i++) { + ins_vec[i].ins_debug.SetLineNumber(lines[i]); } auto pf = pandasm::AsmEmitter::Emit(res.Value()); ASSERT_NE(pf, nullptr); - ClassLinker *classLinker = Runtime::GetCurrent()->GetClassLinker(); - classLinker->AddPandaFile(std::move(pf)); - auto *extension = classLinker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY); + ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker(); + class_linker->AddPandaFile(std::move(pf)); + auto *extension = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY); PandaString descriptor; @@ -98,7 +98,7 @@ public: Method *method = klass->GetDirectMethod(utf::CStringAsMutf8("foo")); ASSERT_NE(method, nullptr); - for (int i = 0; i < insNum; i++) { + for (int i = 0; i < ins_num; i++) { ASSERT_EQ(method->GetLineNumFromBytecodeOffset(offsets[i]), lines[i]) << "do not match on i = " << i; } } @@ -107,10 +107,10 @@ protected: panda::MTManagedThread *thread_; }; -template +template static Frame *CreateFrame(size_t nregs, Method *method, Frame *prev) { - return panda::CreateFrameWithSize(Frame::GetActualSize(nregs), nregs, method, prev); + return panda::CreateFrameWithSize(Frame::GetActualSize(nregs), nregs, method, prev); } TEST_F(MethodTest, SetIntrinsic) diff --git a/runtime/tests/multithreaded_intern_string_table_test.cpp b/runtime/tests/multithreaded_intern_string_table_test.cpp index 69ca37527d850e4b5df8d0e269563e988cd4c92d..3aecee709a1bb053383e2adbcb4183890e7efcb2 100644 --- a/runtime/tests/multithreaded_intern_string_table_test.cpp +++ b/runtime/tests/multithreaded_intern_string_table_test.cpp @@ -128,7 +128,7 @@ public: } } - std::mutex mutex_; + std::mutex mutex; protected: panda::MTManagedThread *thread_; @@ -175,7 +175,7 @@ void TestConcurrentInsertion(const std::array, TEST_ARRAY uint32_t current_array_item = 0; while (true) { { - std::lock_guard lock_guard(test->mutex_); + std::lock_guard lock_guard(test->mutex); if (array_item >= TEST_ARRAY_SIZE) { break; } diff --git a/runtime/tests/object_helpers_test.cpp b/runtime/tests/object_helpers_test.cpp index 8d04ce0cc5bd3b0a5e25b065731a15df81268f5d..fbc6530ffa68d048203c214372f248482c935742 100644 --- a/runtime/tests/object_helpers_test.cpp +++ b/runtime/tests/object_helpers_test.cpp @@ -28,7 +28,7 @@ namespace panda::mem { -inline std::string separator() +inline std::string Separator() { #ifdef _WIN32 return "\\"; @@ -48,7 +48,7 @@ public: options.SetGcTriggerType("debug-never"); auto exec_path = panda::os::file::File::GetExecutablePath(); std::string panda_std_lib = - exec_path.Value() + separator() + ".." + separator() + "pandastdlib" + separator() + "pandastdlib.bin"; + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "pandastdlib.bin"; options.SetBootPandaFiles({panda_std_lib}); Runtime::Create(options); diff --git a/runtime/tests/pygote_space_allocator_test_base.h b/runtime/tests/pygote_space_allocator_test_base.h index 4671d4d64b9c77ef36d9a013db773e4ccac9f956..07caaf10eb6cb328786f17322a1da4b37e09f663 100644 --- a/runtime/tests/pygote_space_allocator_test_base.h +++ b/runtime/tests/pygote_space_allocator_test_base.h @@ -226,11 +226,11 @@ inline void PygoteSpaceAllocatorTest::MuchObjectAllocTest() auto cls = GetObjectClass(); auto global_object_storage = thread_->GetVM()->GetGlobalObjectStorage(); - static constexpr size_t obj_num = 1024; + static constexpr size_t OBJ_NUM = 1024; PandaVector movable_refs; PandaVector non_movable_refs; - for (size_t i = 0; i < obj_num; i++) { + for (size_t i = 0; i < OBJ_NUM; i++) { auto movable = panda::ObjectHeader::Create(cls); movable_refs.push_back(global_object_storage->Add(movable, panda::mem::Reference::ObjectType::GLOBAL)); auto non_movable = panda::ObjectHeader::CreateNonMovable(cls); diff --git a/runtime/tests/runslots_allocator_test.cpp b/runtime/tests/runslots_allocator_test.cpp index 55debd28cde9f8f339f812356decb3e335fa5102..ff1695d1fd65b18be624417bf4716f848bde786c 100644 --- a/runtime/tests/runslots_allocator_test.cpp +++ b/runtime/tests/runslots_allocator_test.cpp @@ -212,23 +212,23 @@ TEST_F(RunSlotsAllocatorTest, AllocateReuse2) // It's regression test mem::MemStatsType *mem_stats = new mem::MemStatsType(); NonObjectAllocator allocator(mem_stats); - static constexpr size_t size1 = 60; - static constexpr size_t size2 = 204; - constexpr char char1 = 'a'; - constexpr char char2 = 'b'; - constexpr char char3 = 'c'; - constexpr char char4 = 'd'; - constexpr char char5 = 'e'; - constexpr char char6 = 'f'; + static constexpr size_t SIZE1 = 60; + static constexpr size_t SIZE2 = 204; + constexpr char CHAR1 = 'a'; + constexpr char CHAR2 = 'b'; + constexpr char CHAR3 = 'c'; + constexpr char CHAR4 = 'd'; + constexpr char CHAR5 = 'e'; + constexpr char CHAR6 = 'f'; AddMemoryPoolToAllocatorProtected(allocator); - char *strA, *strB, *strC, *strD, *strE, *strF; - auto fillStr = [](char *str, char c, size_t size) { + char *str_a, *str_b, *str_c, *str_d, *str_e, *str_f; + auto fill_str = [](char *str, char c, size_t size) { for (size_t i = 0; i < size - 1; i++) { str[i] = c; } str[size - 1] = 0; }; - auto checkStr = [](char *str, char c, size_t size) { + auto check_str = [](char *str, char c, size_t size) { for (size_t i = 0; i < size - 1; i++) { if (str[i] != c) { return false; @@ -236,27 +236,27 @@ TEST_F(RunSlotsAllocatorTest, AllocateReuse2) } return true; }; - strA = reinterpret_cast(allocator.Alloc(size1)); - strB = reinterpret_cast(allocator.Alloc(size1)); - strC = reinterpret_cast(allocator.Alloc(size1)); - fillStr(strA, char1, size1); - fillStr(strB, char2, size1); - fillStr(strC, char3, size1); - ASSERT_TRUE(checkStr(strA, char1, size1)); - ASSERT_TRUE(checkStr(strB, char2, size1)); - ASSERT_TRUE(checkStr(strC, char3, size1)); - allocator.Free(static_cast(strA)); - allocator.Free(static_cast(strB)); - allocator.Free(static_cast(strC)); - strD = reinterpret_cast(allocator.Alloc(size2)); - strE = reinterpret_cast(allocator.Alloc(size2)); - strF = reinterpret_cast(allocator.Alloc(size2)); - fillStr(strD, char4, size2); - fillStr(strE, char5, size2); - fillStr(strF, char6, size2); - ASSERT_TRUE(checkStr(strD, char4, size2)); - ASSERT_TRUE(checkStr(strE, char5, size2)); - ASSERT_TRUE(checkStr(strF, char6, size2)); + str_a = reinterpret_cast(allocator.Alloc(SIZE1)); + str_b = reinterpret_cast(allocator.Alloc(SIZE1)); + str_c = reinterpret_cast(allocator.Alloc(SIZE1)); + fill_str(str_a, CHAR1, SIZE1); + fill_str(str_b, CHAR2, SIZE1); + fill_str(str_c, CHAR3, SIZE1); + ASSERT_TRUE(check_str(str_a, CHAR1, SIZE1)); + ASSERT_TRUE(check_str(str_b, CHAR2, SIZE1)); + ASSERT_TRUE(check_str(str_c, CHAR3, SIZE1)); + allocator.Free(static_cast(str_a)); + allocator.Free(static_cast(str_b)); + allocator.Free(static_cast(str_c)); + str_d = reinterpret_cast(allocator.Alloc(SIZE2)); + str_e = reinterpret_cast(allocator.Alloc(SIZE2)); + str_f = reinterpret_cast(allocator.Alloc(SIZE2)); + fill_str(str_d, CHAR4, SIZE2); + fill_str(str_e, CHAR5, SIZE2); + fill_str(str_f, CHAR6, SIZE2); + ASSERT_TRUE(check_str(str_d, CHAR4, SIZE2)); + ASSERT_TRUE(check_str(str_e, CHAR5, SIZE2)); + ASSERT_TRUE(check_str(str_f, CHAR6, SIZE2)); delete mem_stats; } @@ -334,7 +334,7 @@ TEST_F(RunSlotsAllocatorTest, MTAllocFreeTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocFreeTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); + MtAllocFreeTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); } } @@ -350,7 +350,7 @@ TEST_F(RunSlotsAllocatorTest, MTAllocIterateTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocIterateTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>( + MtAllocIterateTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>( MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT, CrossingMapSingleton::GetCrossingMapGranularity()); } } @@ -367,7 +367,7 @@ TEST_F(RunSlotsAllocatorTest, MTAllocCollectTest) #endif static constexpr size_t MT_TEST_RUN_COUNT = 5; for (size_t i = 0; i < MT_TEST_RUN_COUNT; i++) { - MT_AllocCollectTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); + MtAllocCollectTest<1, RunSlotsType::MaxSlotSize(), THREADS_COUNT>(MIN_ELEMENTS_COUNT, MAX_ELEMENTS_COUNT); } } diff --git a/runtime/tests/stack_walker_test.cpp b/runtime/tests/stack_walker_test.cpp index d57508986defd0b8536b017469cebe9309dd8227..0dc5abaa1b4df185a9fdd5dec54f14f48277ea63 100644 --- a/runtime/tests/stack_walker_test.cpp +++ b/runtime/tests/stack_walker_test.cpp @@ -79,14 +79,14 @@ public: return cls->GetDirectMethod(utf::CStringAsMutf8(method_name.data())); } - static mem::Reference *global_obj; + static mem::Reference *global_obj_; private: bool default_compiler_non_optimizing_; std::string default_compiler_regex_; }; -mem::Reference *StackWalkerTest::global_obj; +mem::Reference *StackWalkerTest::global_obj_; extern "C" int PandaRunnerHook(); extern "C" int StackWalkerHookAArch64Bridge(); @@ -124,7 +124,7 @@ public: { ScopedManagedCodeThread s(thread); auto storage = Runtime::GetCurrent()->GetPandaVM()->GetGlobalObjectStorage(); - StackWalkerTest::global_obj = + StackWalkerTest::global_obj_ = storage->Add(panda::mem::AllocateNullifiedPayloadString(1), mem::Reference::ObjectType::GLOBAL); } auto res = parser.Parse(source.data()); @@ -211,16 +211,16 @@ uint64_t ConvertToU64(T val) } } -template +template int32_t ToCalleeRegister(size_t reg) { - return reg + GetFirstCalleeReg(arch, false); + return reg + GetFirstCalleeReg(ARCH, false); } -template +template int32_t ToCalleeFpRegister(size_t reg) { - return reg + GetFirstCalleeReg(arch, true); + return reg + GetFirstCalleeReg(ARCH, true); } TEST_F(StackWalkerTest, DISABLED_ModifyVreg) @@ -275,7 +275,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) runner.GetCompilerOptions().SetCompilerNonOptimizing(true); runner.GetCompilerOptions().SetCompilerRematConst(false); runner.GetCompilerOptions().SetCompilerRegex("(?!_GLOBAL::testb|_GLOBAL::hook).*"); - [[maybe_unused]] static constexpr std::array frame_values = {0x123456789abcdef, 0xaaaabbbbccccdddd, + [[maybe_unused]] static constexpr std::array FRAME_VALUES = {0x123456789abcdef, 0xaaaabbbbccccdddd, 0xabcdef20}; static int run_count = 0; runner.Run(source, [](uintptr_t lr, [[maybe_unused]] uintptr_t fp) -> int { @@ -288,7 +288,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) success = walker.IterateVRegsWithInfo([&was_set, &walker](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { HOOK_ASSERT(reg.GetLong() == 27, return false); - walker.SetVRegValue(reg_info, frame_values[0]); + walker.SetVRegValue(reg_info, FRAME_VALUES[0]); was_set = true; } return true; @@ -301,7 +301,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) success = walker.IterateVRegsWithInfo([&walker](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { HOOK_ASSERT(reg.GetLong() == 27, return false); - walker.SetVRegValue(reg_info, frame_values[1]); + walker.SetVRegValue(reg_info, FRAME_VALUES[1]); } return true; }); @@ -312,7 +312,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) success = walker.IterateVRegsWithInfo([&walker](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { HOOK_ASSERT(reg.GetLong() == 27, return true;); - walker.SetVRegValue(reg_info, frame_values[2]); + walker.SetVRegValue(reg_info, FRAME_VALUES[2]); } return true; }); @@ -321,7 +321,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) HOOK_ASSERT(!walker.IsCFrame(), return 1); success = walker.IterateVRegsWithInfo([](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { - HOOK_ASSERT(reg.GetLong() == bit_cast(frame_values[0]), return true;); + HOOK_ASSERT(reg.GetLong() == bit_cast(FRAME_VALUES[0]), return true;); } return true; }); @@ -331,7 +331,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) HOOK_ASSERT(walker.IsCFrame(), return 1); success = walker.IterateVRegsWithInfo([](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { - HOOK_ASSERT(reg.GetLong() == bit_cast(frame_values[1]), return true;); + HOOK_ASSERT(reg.GetLong() == bit_cast(FRAME_VALUES[1]), return true;); } return true; }); @@ -341,7 +341,7 @@ TEST_F(StackWalkerTest, DISABLED_ModifyVreg) HOOK_ASSERT(walker.IsCFrame(), return 1); success = walker.IterateVRegsWithInfo([](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { - HOOK_ASSERT(reg.GetLong() == bit_cast(frame_values[2]), return true;); + HOOK_ASSERT(reg.GetLong() == bit_cast(FRAME_VALUES[2]), return true;); } return true; }); @@ -453,7 +453,7 @@ void StackWalkerTest::TestModifyManyVregs(bool is_compiled) int reg_index = 1; bool success = false; - auto obj = Runtime::GetCurrent()->GetPandaVM()->GetGlobalObjectStorage()->Get(StackWalkerTest::global_obj); + auto obj = Runtime::GetCurrent()->GetPandaVM()->GetGlobalObjectStorage()->Get(StackWalkerTest::global_obj_); if (first_run) { success = walker.IterateVRegsWithInfo([®_index, &walker, &obj](const auto ®_info, const auto ®) { if (!reg_info.IsAccumulator()) { diff --git a/runtime/tests/static_analyzer_test.cpp b/runtime/tests/static_analyzer_test.cpp index b3fdb0be601e7ce1a84ec271a896f1385dfac708..d71752e8d927d43a17d00d30c7ff9cd2d4a8fbd6 100644 --- a/runtime/tests/static_analyzer_test.cpp +++ b/runtime/tests/static_analyzer_test.cpp @@ -27,7 +27,7 @@ #include "runtime/mem/object_helpers-inl.h" namespace panda::mem { -inline std::string separator() +inline std::string Separator() { #ifdef _WIN32 return "\\"; @@ -47,7 +47,7 @@ public: options.SetGcTriggerType("debug-never"); auto exec_path = panda::os::file::File::GetExecutablePath(); std::string panda_std_lib = - exec_path.Value() + separator() + ".." + separator() + "pandastdlib" + separator() + "pandastdlib.bin"; + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "pandastdlib.bin"; options.SetBootPandaFiles({panda_std_lib}); Runtime::Create(options); diff --git a/runtime/tests/string_table_base_test.h b/runtime/tests/string_table_base_test.h index 2b33d01201e7a42ad24075b020851ffeaf7db8d7..ef764056d6f13930f98573e3fef97a1ea9f8fb61 100644 --- a/runtime/tests/string_table_base_test.h +++ b/runtime/tests/string_table_base_test.h @@ -138,13 +138,13 @@ public: void InternManyStrings() { ScopedManagedCodeThread s(thread_); - static constexpr size_t iterations = 50; + static constexpr size_t ITERATIONS = 50; auto table = StringTable(); std::vector data {0x00}; const unsigned number_of_letters = 25; LanguageContext ctx = Runtime::GetCurrent()->GetLanguageContext(panda_file::SourceLang::PANDA_ASSEMBLY); - for (size_t i = 0; i < iterations; i++) { + for (size_t i = 0; i < ITERATIONS; i++) { data.insert(data.begin(), (('a' + i) % number_of_letters) + 1); [[maybe_unused]] auto *first_pointer = table.GetOrInternString(AllocUtf8String(data), ctx); [[maybe_unused]] auto *second_pointer = @@ -153,7 +153,7 @@ public: ASSERT_EQ(first_pointer, second_pointer); ASSERT_EQ(second_pointer, third_pointer); } - ASSERT_EQ(table.Size(), iterations); + ASSERT_EQ(table.Size(), ITERATIONS); } void SweepObjectInTable() @@ -164,7 +164,7 @@ public: std::vector data1 {0x01, 0x00}; std::vector data2 {0x02, 0x00}; std::vector data3 {0x03, 0x00}; - const unsigned EXPECTED_TABLE_SIZE = 2; + const unsigned expected_table_size = 2; auto storage = thread_->GetVM()->GetGlobalObjectStorage(); @@ -184,7 +184,7 @@ public: thread_->GetVM()->GetGC()->WaitForGCInManaged(panda::GCTask(panda::GCTaskCause::EXPLICIT_CAUSE)); // Collect all heap for EXPLICIT_CAUSE - ASSERT_EQ(table->Size(), table_init_size + EXPECTED_TABLE_SIZE); + ASSERT_EQ(table->Size(), table_init_size + expected_table_size); storage->Remove(ref1); storage->Remove(ref3); @@ -201,7 +201,7 @@ public: std::vector data1 {0x01, 0x00}; std::vector data2 {0x02, 0x00}; std::vector data3 {0x03, 0x00}; - const unsigned EXPECTED_TABLE_SIZE = 2; + const unsigned expected_table_size = 2; auto storage = thread_->GetVM()->GetGlobalObjectStorage(); @@ -224,7 +224,7 @@ public: thread_->GetVM()->GetGC()->WaitForGCInManaged(panda::GCTask(panda::GCTaskCause::EXPLICIT_CAUSE)); // Collect all heap for EXPLICIT_CAUSE - ASSERT_EQ(table->Size(), table_init_size + EXPECTED_TABLE_SIZE); + ASSERT_EQ(table->Size(), table_init_size + expected_table_size); storage->Remove(ref1); storage->Remove(ref3); @@ -250,7 +250,7 @@ public: data2.push_back(0x00); data3.push_back(0x00); auto table_init_size = table->Size(); - const unsigned EXPECTED_TABLE_SIZE = 2; + const unsigned expected_table_size = 2; auto storage = thread_->GetVM()->GetGlobalObjectStorage(); @@ -273,7 +273,7 @@ public: thread_->GetVM()->GetGC()->WaitForGCInManaged(panda::GCTask(panda::GCTaskCause::EXPLICIT_CAUSE)); // Collect all heap for EXPLICIT_CAUSE - ASSERT_EQ(table->Size(), table_init_size + EXPECTED_TABLE_SIZE); + ASSERT_EQ(table->Size(), table_init_size + expected_table_size); storage->Remove(ref1); storage->Remove(ref3); @@ -298,7 +298,7 @@ public: std::vector string_data(10000U, 0x30); string_data.push_back(0x00); - auto fillHeap = [&string_data, &thread, &objects](bool is_movable) { + auto fill_heap = [&string_data, &thread, &objects](bool is_movable) { while (true) { auto *obj = AllocUtf8String(string_data, is_movable); if (obj == nullptr) { @@ -310,7 +310,7 @@ public: }; { - fillHeap(true); + fill_heap(true); auto *res = table.GetOrInternString(string_data.data(), string_data.size() - 1, panda_class_context); ASSERT_EQ(res, nullptr); ManagedThread::GetCurrent()->ClearException(); @@ -332,7 +332,7 @@ public: auto pf = panda_file::File::OpenFromMemory(std::move(ptr)); - fillHeap(false); + fill_heap(false); auto *res = table.GetOrInternInternalString(*pf.get(), id, panda_class_context); ASSERT_EQ(res, nullptr); ManagedThread::GetCurrent()->ClearException(); diff --git a/runtime/tests/string_test.cpp b/runtime/tests/string_test.cpp index 20b278432aecec9644441d739508925a57b7119a..dd361e4cf29cef12be37820f69892fbc12c59914 100644 --- a/runtime/tests/string_test.cpp +++ b/runtime/tests/string_test.cpp @@ -257,15 +257,15 @@ TEST_F(StringTest, lengthUtf16) TEST_F(StringTest, DifferentLengthStringCompareTest) { - static constexpr uint32_t f_string_length = 8; - static constexpr char f_string[f_string_length + 1] = "Hello, w"; + static constexpr uint32_t F_STRING_LENGTH = 8; + static constexpr char F_STRING[F_STRING_LENGTH + 1] = "Hello, w"; String *first_string = String::CreateFromMUtf8(reinterpret_cast(SIMPLE_UTF8_STRING), SIMPLE_UTF8_STRING_LENGTH, GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); ASSERT_EQ(first_string->GetLength(), SIMPLE_UTF8_STRING_LENGTH); - String *second_string = String::CreateFromMUtf8(reinterpret_cast(f_string), f_string_length, + String *second_string = String::CreateFromMUtf8(reinterpret_cast(F_STRING), F_STRING_LENGTH, GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); - ASSERT_EQ(second_string->GetLength(), f_string_length); + ASSERT_EQ(second_string->GetLength(), F_STRING_LENGTH); ASSERT_EQ(String::StringsAreEqual(first_string, second_string), false); } @@ -392,11 +392,11 @@ TEST_F(StringTest, RegionCopyTestUtf16) TEST_F(StringTest, SameLengthStringCompareTest) { - static constexpr uint32_t string_length = 10; - char *f_string = new char[string_length + 1]; - char *s_string = new char[string_length + 1]; + static constexpr uint32_t STRING_LENGTH = 10; + char *f_string = new char[STRING_LENGTH + 1]; + char *s_string = new char[STRING_LENGTH + 1]; - for (uint32_t i = 0; i < string_length; i++) { + for (uint32_t i = 0; i < STRING_LENGTH; i++) { // Hack for ConvertMUtf8ToUtf16 call. // We should use char from 0x7f to 0x0 if we want to // generate one utf16 (0x00xx) from this mutf8. @@ -417,29 +417,29 @@ TEST_F(StringTest, SameLengthStringCompareTest) } // Set the last elements in strings with size more than 0x8 to disable compressing. // This will leads to count two MUtf-8 bytes as one UTF-16 so length = string_length - 1 - f_string[string_length - 2] = uint8_t(0x80); - s_string[string_length - 2] = uint8_t(0x80); - f_string[string_length - 1] = uint8_t(0x01); - s_string[string_length - 1] = uint8_t(0x01); - f_string[string_length] = '\0'; - s_string[string_length] = '\0'; - - String *first_utf16_string = String::CreateFromMUtf8(reinterpret_cast(f_string), string_length - 1, + f_string[STRING_LENGTH - 2] = uint8_t(0x80); + s_string[STRING_LENGTH - 2] = uint8_t(0x80); + f_string[STRING_LENGTH - 1] = uint8_t(0x01); + s_string[STRING_LENGTH - 1] = uint8_t(0x01); + f_string[STRING_LENGTH] = '\0'; + s_string[STRING_LENGTH] = '\0'; + + String *first_utf16_string = String::CreateFromMUtf8(reinterpret_cast(f_string), STRING_LENGTH - 1, GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); // Try to use function with automatic length detection String *second_utf16_string = String::CreateFromMUtf8(reinterpret_cast(s_string), GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); - ASSERT_EQ(first_utf16_string->GetLength(), string_length - 1); - ASSERT_EQ(second_utf16_string->GetLength(), string_length - 1); + ASSERT_EQ(first_utf16_string->GetLength(), STRING_LENGTH - 1); + ASSERT_EQ(second_utf16_string->GetLength(), STRING_LENGTH - 1); // Dirty hack to not create utf16 for our purpose, just reuse old one // Try to create compressed strings. - String *first_utf8_string = String::CreateFromUtf16(first_utf16_string->GetDataUtf16(), string_length - 1, + String *first_utf8_string = String::CreateFromUtf16(first_utf16_string->GetDataUtf16(), STRING_LENGTH - 1, GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); - String *second_utf8_string = String::CreateFromUtf16(first_utf16_string->GetDataUtf16(), string_length - 1, + String *second_utf8_string = String::CreateFromUtf16(first_utf16_string->GetDataUtf16(), STRING_LENGTH - 1, GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); - ASSERT_EQ(first_utf8_string->GetLength(), string_length - 1); - ASSERT_EQ(second_utf8_string->GetLength(), string_length - 1); + ASSERT_EQ(first_utf8_string->GetLength(), STRING_LENGTH - 1); + ASSERT_EQ(second_utf8_string->GetLength(), STRING_LENGTH - 1); ASSERT_EQ(String::StringsAreEqual(first_utf16_string, second_utf16_string), strcmp(f_string, s_string) == 0); ASSERT_EQ(String::StringsAreEqual(first_utf16_string, second_utf8_string), @@ -728,17 +728,17 @@ TEST_F(StringTest, ConcatTest) TEST_F(StringTest, DoReplaceTest0) { - static constexpr uint32_t string_length = 10; - char *f_string = new char[string_length + 1]; - char *s_string = new char[string_length + 1]; + static constexpr uint32_t STRING_LENGTH = 10; + char *f_string = new char[STRING_LENGTH + 1]; + char *s_string = new char[STRING_LENGTH + 1]; - for (uint32_t i = 0; i < string_length; i++) { + for (uint32_t i = 0; i < STRING_LENGTH; i++) { f_string[i] = 'A' + i; s_string[i] = 'A' + i; } f_string[0] = 'Z'; - f_string[string_length] = '\0'; - s_string[string_length] = '\0'; + f_string[STRING_LENGTH] = '\0'; + s_string[STRING_LENGTH] = '\0'; String *f_string_s = String::CreateFromMUtf8(reinterpret_cast(f_string), GetLanguageContext(), Runtime::GetCurrent()->GetPandaVM()); diff --git a/runtime/tests/time_utils_test.cpp b/runtime/tests/time_utils_test.cpp index f74bdd261f4abdc09540f5a88cd9792e623544b4..06a4100cd7ddd0ec6e288a36f277bc9f68cc7721 100644 --- a/runtime/tests/time_utils_test.cpp +++ b/runtime/tests/time_utils_test.cpp @@ -55,27 +55,27 @@ protected: TEST_F(TimeTest, TimerTest) { - uint64_t duration_ = 0; + uint64_t duration = 0; { - Timer timer(&duration_); + Timer timer(&duration); std::this_thread::sleep_for(std::chrono::nanoseconds(10)); } - ASSERT_GT(duration_, 0); + ASSERT_GT(duration, 0); - uint64_t last_duration_ = duration_; + uint64_t last_duration = duration; { - Timer timer(&duration_); + Timer timer(&duration); std::this_thread::sleep_for(std::chrono::nanoseconds(10)); } - ASSERT_GT(duration_, last_duration_); + ASSERT_GT(duration, last_duration); { - Timer timer(&duration_, true); - ASSERT_EQ(duration_, 0); + Timer timer(&duration, true); + ASSERT_EQ(duration, 0); } // There is some nondeterminism in sleep, moreover with small values // and check (duration_ < last_duration_) may fail - ASSERT_GT(duration_, 0); + ASSERT_GT(duration, 0); } TEST_F(TimeTest, CurrentTimeStringTest) diff --git a/runtime/tests/tlab_test.cpp b/runtime/tests/tlab_test.cpp index 88d92e9164cc548c25c1eb71ab94a3b44730e862..ce3a58e4a0c411ad009b993d37eb4f496b150636 100644 --- a/runtime/tests/tlab_test.cpp +++ b/runtime/tests/tlab_test.cpp @@ -52,9 +52,9 @@ protected: ASAN_UNPOISON_MEMORY_REGION(mem, TLAB_TEST_SIZE); std::pair new_pair {mem, TLAB_TEST_SIZE}; allocated_mem_mmap_.push_back(new_pair); - auto newTLAB = + auto new_tlab = new (mem) TLAB(ToVoidPtr(ToUintPtr(mem) + sizeof(mem::TLAB)), TLAB_TEST_SIZE - sizeof(mem::TLAB)); - return newTLAB; + return new_tlab; } std::vector> allocated_mem_mmap_; diff --git a/runtime/tests/tooling/CMakeLists.txt b/runtime/tests/tooling/CMakeLists.txt index 9d0cd991736a5d8f334278404c151aed0548cf0b..ad3d9b1858dd74225c5c38fc0f518dba6819b51e 100644 --- a/runtime/tests/tooling/CMakeLists.txt +++ b/runtime/tests/tooling/CMakeLists.txt @@ -32,3 +32,10 @@ target_include_directories(debugtest PUBLIC ) panda_add_sanitizers(TARGET debugtest SANITIZERS ${PANDA_SANITIZERS_LIST}) + +# Sampling profiler tests +# Currently profiler is not supported for arm64 +# TODO(m.strizhak): support for device +if (NOT PANDA_TARGET_ARM64) + add_subdirectory(sampler) +endif() \ No newline at end of file diff --git a/runtime/tests/tooling/init.cpp b/runtime/tests/tooling/init.cpp index dbac053438e413bae8a3a70e39c8134effbc9e2c..310d948db7821befa6a6b9062cf15d24c3489c15 100644 --- a/runtime/tests/tooling/init.cpp +++ b/runtime/tests/tooling/init.cpp @@ -23,25 +23,25 @@ namespace panda::tooling::test { extern const char *GetCurrentTestName(); -static std::thread g_debuggerThread; +static std::thread G_DEBUGGER_THREAD; -static std::unique_ptr g_runner {nullptr}; +static std::unique_ptr G_RUNNER {nullptr}; extern "C" int StartDebugger(uint32_t, DebugInterface *iface, void *) { - const char *testName = GetCurrentTestName(); - g_runner = std::make_unique(testName, iface); - g_debuggerThread = std::thread([] { + const char *test_name = GetCurrentTestName(); + G_RUNNER = std::make_unique(test_name, iface); + G_DEBUGGER_THREAD = std::thread([] { TestUtil::WaitForInit(); - g_runner->Run(); + G_RUNNER->Run(); }); return 0; } extern "C" int StopDebugger() { - g_debuggerThread.join(); - g_runner->TerminateTest(); + G_DEBUGGER_THREAD.join(); + G_RUNNER->TerminateTest(); return 0; } } // namespace panda::tooling::test diff --git a/runtime/tests/tooling/sampler/CMakeLists.txt b/runtime/tests/tooling/sampler/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbbbbc973a4967af6de4f46a2c1a7655080def4b --- /dev/null +++ b/runtime/tests/tooling/sampler/CMakeLists.txt @@ -0,0 +1,24 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.14.1 FATAL_ERROR) + +add_gtests( + sampling_profiler_test + sampling_profiler_test.cpp +) + +add_panda_assembly(TARGET sampling_profiler_test_ark_asm SOURCE sampling_profiler_test.pa) + +add_dependencies(sampling_profiler_test_ark_asm ark_asm) +add_dependencies(sampling_profiler_test_gtests sampling_profiler_test_ark_asm) \ No newline at end of file diff --git a/runtime/tests/tooling/sampler/sampling_profiler_test.cpp b/runtime/tests/tooling/sampler/sampling_profiler_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2caaf6c0a93dea03ca0ebd869835dee1a352ddde --- /dev/null +++ b/runtime/tests/tooling/sampler/sampling_profiler_test.cpp @@ -0,0 +1,748 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "assembler/assembly-parser.h" +#include "libpandafile/file.h" +#include "libpandabase/trace/trace.h" +#include "libpandabase/panda_gen_options/generated/base_options.h" +#include "runtime/include/thread_scopes.h" +#include "runtime/include/runtime.h" +#include "runtime/tooling/sampler/sampling_profiler.h" +#include "runtime/interpreter/runtime_interface.h" +#include "tools/sampler/aspt_converter.h" + +namespace panda::tooling::sampler::test { + +inline std::string Separator() +{ +#ifdef _WIN32 + return "\\"; +#else + return "/"; +#endif +} + +static const char *PROFILER_FILENAME = "profiler_result.aspt"; +static const char *PANDA_FILE_NAME = "sampling_profiler_test_ark_asm.abc"; +static constexpr size_t TEST_CYCLE_THRESHOLD = 100; + +class SamplerTest : public testing::Test { +public: + // NOLINTNEXTLINE(readability-function-size) + void SetUp() override + { + Logger::Initialize(base_options::Options("")); + + RuntimeOptions options; + options.SetBootClassSpaces({"core"}); + options.SetRuntimeType("core"); + options.SetRunGcInPlace(true); + options.SetVerifyCallStack(false); + auto exec_path = panda::os::file::File::GetExecutablePath(); + std::string panda_std_lib = + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "arkstdlib.abc"; + options.SetBootPandaFiles({panda_std_lib}); + Runtime::Create(options); + + auto pf = panda_file::OpenPandaFileOrZip(PANDA_FILE_NAME); + Runtime::GetCurrent()->GetClassLinker()->AddPandaFile(std::move(pf)); + + thread_ = panda::MTManagedThread::GetCurrent(); + } + + void TearDown() override + { + Runtime::Destroy(); + } + + void FullfillFakeSample(SampleInfo *ps) + { + for (uint32_t i = 0; i < SampleInfo::MAX_STACK_DEPTH; ++i) { + ps->managed_stack[i] = {i, pf_id_}; + } + ps->managed_stack_size = SampleInfo::MAX_STACK_DEPTH; + } + + // Friend wrappers for accesing samplers private fields + static os::thread::native_handle_type ExtractAgentTid(const Sampler *s_ptr) + { + return s_ptr->agent_tid_; + } + + static os::thread::native_handle_type ExtractListenerTid(const Sampler *s_ptr) + { + return s_ptr->listener_tid_; + } + + static os::thread::native_handle_type ExtractSamplerTid(const Sampler *s_ptr) + { + return s_ptr->sampler_tid_; + } + + static PandaSet ExtractManagedThreads(Sampler *s_ptr) + { + // Sending a copy to avoid of datarace + os::memory::LockHolder holder(s_ptr->managed_threads_lock_); + PandaSet managed_threads_copy = s_ptr->managed_threads_; + return managed_threads_copy; + } + + static size_t ExtractLoadedPFSize(Sampler *s_ptr) + { + os::memory::LockHolder holder(s_ptr->loaded_pfs_lock_); + return s_ptr->loaded_pfs_.size(); + } + + static std::array ExtractPipes(const Sampler *s_ptr) + { + // Sending a copy to avoid of datarace + return s_ptr->communicator_.listener_pipe_; + } + + static bool ExtractIsActive(const Sampler *s_ptr) + { + return s_ptr->is_active_; + } + +protected: + // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes) + panda::MTManagedThread *thread_ {nullptr}; + // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes) + uintptr_t pf_id_ {0}; +}; + +TEST_F(SamplerTest, SamplerInitTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + ASSERT_EQ(ExtractAgentTid(sp), os::thread::GetNativeHandle()); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), false); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + + // Second run + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + Sampler::Destroy(sp); +} + +void RunManagedThread(std::atomic *sync_flag) +{ + auto *m_thr = + panda::MTManagedThread::Create(panda::Runtime::GetCurrent(), panda::Runtime::GetCurrent()->GetPandaVM()); + m_thr->ManagedCodeBegin(); + + *sync_flag = true; + while (*sync_flag) { + // Calling safepoint 'cause starting profiler required to stop all managed threads + interpreter::RuntimeInterface::Safepoint(); + } + + m_thr->ManagedCodeEnd(); + m_thr->Destroy(); +} + +void RunManagedThreadAndSaveThreadId(std::atomic *sync_flag, os::thread::ThreadId *id) +{ + auto *m_thr = + panda::MTManagedThread::Create(panda::Runtime::GetCurrent(), panda::Runtime::GetCurrent()->GetPandaVM()); + m_thr->ManagedCodeBegin(); + + *id = os::thread::GetCurrentThreadId(); + *sync_flag = true; + while (*sync_flag) { + // Calling safepoint 'cause starting profiler required to stop all managed threads + interpreter::RuntimeInterface::Safepoint(); + } + + m_thr->ManagedCodeEnd(); + m_thr->Destroy(); +} + +void RunNativeThread(std::atomic *sync_flag) +{ + auto *m_thr = + panda::MTManagedThread::Create(panda::Runtime::GetCurrent(), panda::Runtime::GetCurrent()->GetPandaVM()); + + *sync_flag = true; + while (*sync_flag) { + } + + m_thr->Destroy(); +} + +// Testing notification thread started/finished +TEST_F(SamplerTest, SamplerEventThreadNotificationTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + ASSERT_TRUE(ExtractManagedThreads(sp).empty()); + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + ASSERT_FALSE(ExtractManagedThreads(sp).empty()); + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + std::atomic sync_flag1 = false; + std::atomic sync_flag2 = false; + std::atomic sync_flag3 = false; + std::thread managed_thread1(RunManagedThread, &sync_flag1); + std::thread managed_thread2(RunManagedThread, &sync_flag2); + std::thread managed_thread3(RunManagedThread, &sync_flag3); + + while (!sync_flag1 || !sync_flag2 || !sync_flag3) { + ; + } + ASSERT_EQ(ExtractManagedThreads(sp).size(), 4); + + sync_flag1 = false; + sync_flag2 = false; + sync_flag3 = false; + managed_thread1.join(); + managed_thread2.join(); + managed_thread3.join(); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + Sampler::Destroy(sp); +} + +// Testing notification thread started/finished +TEST_F(SamplerTest, SamplerCheckThreadIdTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + ASSERT_TRUE(ExtractManagedThreads(sp).empty()); + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + std::atomic sync_flag1 = false; + os::thread::ThreadId mt_id = 0; + std::thread managed_thread1(RunManagedThreadAndSaveThreadId, &sync_flag1, &mt_id); + + while (!sync_flag1) { + ; + } + ASSERT_EQ(ExtractManagedThreads(sp).size(), 2); + bool is_passed = false; + + for (const auto &elem : ExtractManagedThreads(sp)) { + if (elem == mt_id) { + is_passed = true; + break; + } + } + ASSERT_TRUE(is_passed); + + sync_flag1 = false; + managed_thread1.join(); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + Sampler::Destroy(sp); +} + +// Testing thread collection +TEST_F(SamplerTest, SamplerCollectThreadTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + ASSERT_TRUE(ExtractManagedThreads(sp).empty()); + + std::atomic sync_flag1 = false; + std::atomic sync_flag2 = false; + std::atomic sync_flag3 = false; + std::thread managed_thread1(RunManagedThread, &sync_flag1); + std::thread managed_thread2(RunManagedThread, &sync_flag2); + std::thread managed_thread3(RunManagedThread, &sync_flag3); + + while (!sync_flag1 || !sync_flag2 || !sync_flag3) { + ; + } + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 4); + + sync_flag1 = false; + sync_flag2 = false; + sync_flag3 = false; + managed_thread1.join(); + managed_thread2.join(); + managed_thread3.join(); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + Sampler::Destroy(sp); +} + +// Testing native thread collection +TEST_F(SamplerTest, SamplerCollectNativeThreadTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + ASSERT_TRUE(ExtractManagedThreads(sp).empty()); + + std::atomic sync_flag1 = false; + std::atomic sync_flag2 = false; + std::atomic sync_flag3 = false; + std::thread managed_thread1(RunManagedThread, &sync_flag1); + std::thread native_thread2(RunNativeThread, &sync_flag2); + + while (!sync_flag1 || !sync_flag2) { + ; + } + + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + ASSERT_NE(ExtractListenerTid(sp), 0); + ASSERT_NE(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), true); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 3); + std::thread native_thread3(RunNativeThread, &sync_flag3); + while (!sync_flag3) { + ; + } + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 4); + + sync_flag1 = false; + sync_flag2 = false; + sync_flag3 = false; + managed_thread1.join(); + native_thread2.join(); + native_thread3.join(); + + ASSERT_EQ(ExtractManagedThreads(sp).size(), 1); + + sp->Stop(); + ASSERT_EQ(ExtractListenerTid(sp), 0); + ASSERT_EQ(ExtractSamplerTid(sp), 0); + ASSERT_EQ(ExtractIsActive(sp), false); + Sampler::Destroy(sp); +} + +// Testing pipes +TEST_F(SamplerTest, SamplerPipesTest) +{ + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + sp->Start(PROFILER_FILENAME); + + ASSERT_NE(ExtractPipes(sp)[ThreadCommunicator::PIPE_READ_ID], 0); + ASSERT_NE(ExtractPipes(sp)[ThreadCommunicator::PIPE_WRITE_ID], 0); + + sp->Stop(); + Sampler::Destroy(sp); +} + +// Stress testing restart +TEST_F(SamplerTest, ProfilerRestartStressTest) +{ + constexpr size_t CURRENT_TEST_THRESHOLD = TEST_CYCLE_THRESHOLD; + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + + for (uint32_t i = 0; i < CURRENT_TEST_THRESHOLD; i++) { + ASSERT_EQ(sp->Start(PROFILER_FILENAME), true); + sp->Stop(); + } + + Sampler::Destroy(sp); +} + +TEST_F(SamplerTest, ThreadCommunicatorTest) +{ + ThreadCommunicator communicator; + + SampleInfo sample_input; + SampleInfo sample_output; + FullfillFakeSample(&sample_input); + ASSERT_TRUE(communicator.Init()); + ASSERT_TRUE(communicator.SendSample(sample_input)); + ASSERT_TRUE(communicator.ReadSample(&sample_output)); + ASSERT_EQ(sample_output, sample_input); +} + +static void CommunicatorStressWritterThread(const ThreadCommunicator *com, const SampleInfo &sample, + uint32_t messages_amount) +{ + for (uint32_t i = 0; i < messages_amount; ++i) { + // If the sample write failed we retrying to send it + if (!com->SendSample(sample)) { + std::cerr << "Failed to send a sample" << std::endl; + abort(); + } + } +} + +TEST_F(SamplerTest, ThreadCommunicatorMultithreadTest) +{ + constexpr uint32_t MESSAGES_AMOUNT = TEST_CYCLE_THRESHOLD * 100; + + ThreadCommunicator communicator; + SampleInfo sample_output; + SampleInfo sample_input; + FullfillFakeSample(&sample_input); + ASSERT_TRUE(communicator.Init()); + + std::thread sender(CommunicatorStressWritterThread, &communicator, sample_input, MESSAGES_AMOUNT); + for (uint32_t i = 0; i < MESSAGES_AMOUNT; ++i) { + // If the sample write failed we retrying to send it + if (!communicator.ReadSample(&sample_output)) { + std::cerr << "Failed to read a sample" << std::endl; + abort(); + } + ASSERT_EQ(sample_output, sample_input); + } + sender.join(); +} + +// Testing reader and writer by writing and reading from .aspt one sample +TEST_F(SamplerTest, StreamWriterReaderTest) +{ + const char *stream_test_filename = "stream_writer_reader_test.aspt"; + SampleInfo sample_output; + SampleInfo sample_input; + + { + StreamWriter writer(stream_test_filename); + FullfillFakeSample(&sample_input); + + writer.WriteSample(sample_input); + } + + SampleReader reader(stream_test_filename); + ASSERT_TRUE(reader.GetNextSample(&sample_output)); + ASSERT_EQ(sample_output, sample_input); + ASSERT_FALSE(reader.GetNextSample(&sample_output)); + ASSERT_FALSE(reader.GetNextModule(nullptr)); +} + +// Testing reader and writer by writing and reading from .aspt lots of samples +TEST_F(SamplerTest, StreamWriterReaderLotsSamplesTest) +{ + constexpr size_t CURRENT_TEST_THRESHOLD = TEST_CYCLE_THRESHOLD * 100; + const char *stream_test_filename = "stream_writer_reader_test_lots_samples.aspt"; + SampleInfo sample_output; + SampleInfo sample_input; + + { + StreamWriter writer(stream_test_filename); + FullfillFakeSample(&sample_input); + + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + writer.WriteSample(sample_input); + } + } + + SampleReader reader(stream_test_filename); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + ASSERT_TRUE(reader.GetNextSample(&sample_output)); + ASSERT_EQ(sample_output, sample_input); + } + ASSERT_FALSE(reader.GetNextSample(&sample_output)); + ASSERT_FALSE(reader.GetNextModule(nullptr)); +} + +// Testing reader and writer by writing and reading from .aspt one module +TEST_F(SamplerTest, ModuleWriterReaderTest) +{ + const char *stream_test_filename = "stream_module_test_filename.aspt"; + FileInfo module_input = {pf_id_, "~/folder/folder/lib/panda_file.pa"}; + FileInfo module_output = {}; + + { + StreamWriter writer(stream_test_filename); + writer.WriteModule(module_input); + } + + SampleReader reader(stream_test_filename); + ASSERT_TRUE(reader.GetNextModule(&module_output)); + ASSERT_EQ(module_output, module_input); + ASSERT_FALSE(reader.GetNextModule(&module_output)); + ASSERT_FALSE(reader.GetNextSample(nullptr)); +} + +// Testing reader and writer by writing and reading from .aspt lots of modules +TEST_F(SamplerTest, ModuleWriterReaderLotsModulesTest) +{ + constexpr size_t CURRENT_TEST_THRESHOLD = TEST_CYCLE_THRESHOLD * 100; + const char *stream_test_filename = "stream_lots_modules_test_filename.aspt"; + FileInfo module_input = {pf_id_, "~/folder/folder/lib/panda_file.pa"}; + FileInfo module_output = {}; + + { + StreamWriter writer(stream_test_filename); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + writer.WriteModule(module_input); + } + } + + SampleReader reader(stream_test_filename); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + ASSERT_TRUE(reader.GetNextModule(&module_output)); + ASSERT_EQ(module_output, module_input); + } + ASSERT_FALSE(reader.GetNextModule(&module_output)); + ASSERT_FALSE(reader.GetNextSample(nullptr)); +} + +// Testing reader and writer by writing and reading from .aspt lots of modules +TEST_F(SamplerTest, WriterReaderLotsRowsModulesAndSamplesTest) +{ + constexpr size_t CURRENT_TEST_THRESHOLD = TEST_CYCLE_THRESHOLD * 100; + const char *stream_test_filename = "stream_lots_modules_and_samples_test_filename.aspt"; + FileInfo module_input = {pf_id_, "~/folder/folder/lib/panda_file.pa"}; + FileInfo module_output = {}; + SampleInfo sample_output; + SampleInfo sample_input; + + { + StreamWriter writer(stream_test_filename); + FullfillFakeSample(&sample_input); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + writer.WriteModule(module_input); + writer.WriteSample(sample_input); + } + } + + SampleReader reader(stream_test_filename); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + ASSERT_TRUE(reader.GetNextModule(&module_output)); + ASSERT_EQ(module_output, module_input); + } + + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + ASSERT_TRUE(reader.GetNextSample(&sample_output)); + ASSERT_EQ(sample_output, sample_input); + } + + ASSERT_FALSE(reader.GetNextModule(&module_output)); + ASSERT_FALSE(reader.GetNextSample(&sample_output)); +} + +// Send sample to listener and check it inside the file +TEST_F(SamplerTest, ListenerWriteFakeSampleTest) +{ + const char *stream_test_filename = "listener_write_fake_sample_test.aspt"; + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + ASSERT_EQ(sp->Start(stream_test_filename), true); + + SampleInfo sample_output; + SampleInfo sample_input; + FullfillFakeSample(&sample_input); + sp->GetCommunicator().SendSample(sample_input); + sp->Stop(); + + bool status = true; + bool is_passed = false; + SampleReader reader(stream_test_filename); + while (status) { + status = reader.GetNextSample(&sample_output); + if (sample_output == sample_input) { + is_passed = true; + break; + } + } + + ASSERT_TRUE(is_passed); + + Sampler::Destroy(sp); +} + +// Send lots of sample to listener and check it inside the file +TEST_F(SamplerTest, ListenerWriteLotsFakeSampleTest) +{ + constexpr size_t CURRENT_TEST_THRESHOLD = TEST_CYCLE_THRESHOLD * 100; + const char *stream_test_filename = "listener_write_lots_fake_sample_test.aspt"; + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + ASSERT_EQ(sp->Start(stream_test_filename), true); + + SampleInfo sample_output; + SampleInfo sample_input; + size_t sent_samples_counter = 0; + FullfillFakeSample(&sample_input); + for (size_t i = 0; i < CURRENT_TEST_THRESHOLD; ++i) { + if (sp->GetCommunicator().SendSample(sample_input)) { + ++sent_samples_counter; + } + } + sp->Stop(); + + bool status = true; + size_t amount_of_samples = 0; + SampleReader reader(stream_test_filename); + while (status) { + if (sample_output == sample_input) { + ++amount_of_samples; + } + status = reader.GetNextSample(&sample_output); + } + + ASSERT_EQ(amount_of_samples, sent_samples_counter); + + Sampler::Destroy(sp); +} + +// Checking that sampler collect panda files correctly +TEST_F(SamplerTest, CollectPandaFilesTest) +{ + const char *stream_test_filename = "collect_panda_file_test.aspt"; + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + ASSERT_EQ(sp->Start(stream_test_filename), true); + sp->Stop(); + + FileInfo module_info; + SampleReader reader(stream_test_filename); + bool status = false; + while (reader.GetNextModule(&module_info)) { + auto pf_ptr = reinterpret_cast(module_info.ptr); + ASSERT_EQ(pf_ptr->GetFullFileName(), module_info.pathname); + status = true; + } + ASSERT_TRUE(status); + Sampler::Destroy(sp); +} + +// Checking that sampler collect panda files correctly +TEST_F(SamplerTest, WriteModuleEventTest) +{ + const char *stream_test_filename = "collect_panda_file_test.aspt"; + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + ASSERT_EQ(sp->Start(stream_test_filename), true); + + auto exec_path = panda::os::file::File::GetExecutablePath(); + std::string pandafile = + exec_path.Value() + Separator() + ".." + Separator() + "pandastdlib" + Separator() + "arkstdlib.abc"; + + auto pf = panda_file::OpenPandaFileOrZip(pandafile); + Runtime::GetCurrent()->GetClassLinker()->AddPandaFile(std::move(pf)); + + ASSERT_EQ(ExtractLoadedPFSize(sp), 1); + sp->Stop(); + + FileInfo module_info; + SampleReader reader(stream_test_filename); + bool status = false; + while (reader.GetNextModule(&module_info)) { + auto pf_ptr = reinterpret_cast(module_info.ptr); + ASSERT_EQ(pf_ptr->GetFullFileName(), module_info.pathname); + status = true; + } + ASSERT_TRUE(status); + + Sampler::Destroy(sp); +} + +// Sampling big pandasm program and convert it +TEST_F(SamplerTest, ProfilerSamplerSignalHandlerTest) +{ + const char *stream_test_filename = "sampler_signal_handler_test.aspt"; + const char *result_test_filename = "sampler_signal_handler_test.csv"; + size_t sample_counter = 0; + + { + auto *sp = Sampler::Create(); + ASSERT_NE(sp, nullptr); + ASSERT_EQ(sp->Start(stream_test_filename), true); + + { + ASSERT_TRUE(Runtime::GetCurrent()->Execute("_GLOBAL::main", {})); + } + sp->Stop(); + + SampleInfo sample; + SampleReader reader(stream_test_filename); + bool is_find = false; + + while (reader.GetNextSample(&sample)) { + ++sample_counter; + if (sample.managed_stack_size == 2) { + is_find = true; + continue; + } + ASSERT_NE(sample.managed_stack_size, 0); + } + + ASSERT_EQ(is_find, true); + + Sampler::Destroy(sp); + } + + // Checking converter + { + AsptConverter conv(stream_test_filename); + ASSERT_EQ(conv.CollectTracesStats(), sample_counter); + ASSERT_TRUE(conv.CollectModules()); + ASSERT_TRUE(conv.DumpResolvedTracesAsCSV(result_test_filename)); + } +} + +} // namespace panda::tooling::sampler::test \ No newline at end of file diff --git a/runtime/tests/tooling/sampler/sampling_profiler_test.pa b/runtime/tests/tooling/sampler/sampling_profiler_test.pa new file mode 100644 index 0000000000000000000000000000000000000000..d64228feb24f7712ea0857baefb3e5d07a4c3484 --- /dev/null +++ b/runtime/tests/tooling/sampler/sampling_profiler_test.pa @@ -0,0 +1,507 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.record Math +.record IO +.record Globals{ + f64 SOLAR_MASS + f64 DAYS_PER_YEAR +} + +.record Body{ + f64 x + f64 y + f64 z + f64 vx + f64 vy + f64 vz + f64 mass +} + +.function f64 Math.sqrt(f64 a0) +.function f64 Math.absF64(f64 a0) +.function u1 main(){ + fldai.64 365.24 + ststatic Globals.DAYS_PER_YEAR + fmovi.64 v3, 3.141592653589793 + fldai.64 4.0 + fmul2.64 v3 + fmul2.64 v3 + ststatic Globals.SOLAR_MASS + movi v0, 3 + movi v1, 5000 + fmovi.64 v2, -4.395717154909567 + call test, v0, v1, v2 + return +} + +.function u1 test(i32 a0, i32 a1, f64 a2){ + fmovi.64 v0, 0.0 #ret + fmovi.64 v6, 0.01 + movi v2, 5 + mov v1, a0 #loop_counter + lda v1 +loop: + jgt a1, loop_exit + newarr v4, v2, Body[] + movi v3, 0 + call.short Sun + starr.obj v4, v3 + inci v3, 1 + call.short Jupiter + starr.obj v4, v3 + inci v3, 1 + call.short Saturn + starr.obj v4, v3 + inci v3, 1 + call.short Uranus + starr.obj v4, v3 + inci v3, 1 + call.short Neptune + starr.obj v4, v3 + call.short NBodySystem, v4 + lda v1 + muli 100 + sta v3 #max + call.short energy, v4 + fadd2.64 v0 + sta.64 v0 + movi v5, 0 +loop2: + lda v5 + jeq v3, loop2_exit + call.short advance, v4, v6 + inci v5, 1 + jmp loop2 +loop2_exit: + call.short energy, v4 + fadd2.64 v0 + sta.64 v0 + lda v1 + muli 2 + sta v1 + jmp loop +loop_exit: + lda.64 v0 + fsub2.64 a2 + sta.64 v0 + call.short Math.absF64, v0 + fldai.64 1e-13 + fcmpl.64 v0 + jltz exit_failure + ldai 0 + return +exit_failure: + ldai 1 + return +} + +.function void BodyInit(Body a0, f64 a1, f64 a2, f64 a3, f64 a4, f64 a5, f64 a6, f64 a7){ + lda.64 a1 + stobj a0, Body.x + lda.64 a2 + stobj a0, Body.y + lda.64 a3 + stobj a0, Body.z + lda.64 a4 + stobj a0, Body.vx + lda.64 a5 + stobj a0, Body.vy + lda.64 a6 + stobj a0, Body.vz + lda.64 a7 + stobj a0, Body.mass + return.void +} + +.function Body Jupiter(){ + ldstatic Globals.DAYS_PER_YEAR + sta.64 v8 + ldstatic Globals.SOLAR_MASS + sta.64 v9 + newobj v0, Body + fmovi.64 v1, 4.84143144246472090e+00 + fmovi.64 v2, -1.16032004402742839e+00 + fmovi.64 v3, -1.03622044471123109e-01 + fldai.64 1.66007664274403694e-03 + fmul2.64 v8 + sta.64 v4 + fldai.64 7.69901118419740425e-03 + fmul2.64 v8 + sta.64 v5 + fldai.64 -6.90460016972063023e-05 + fmul2.64 v8 + sta.64 v6 + fldai.64 9.54791938424326609e-04 + fmul2.64 v9 + sta.64 v7 + call.range BodyInit, v0 + lda.obj v0 + return.obj +} + +.function Body Saturn(){ + ldstatic Globals.DAYS_PER_YEAR + sta.64 v8 + ldstatic Globals.SOLAR_MASS + sta.64 v9 + newobj v0, Body + fmovi.64 v1, 8.34336671824457987e+00 + fmovi.64 v2, 4.12479856412430479e+00 + fmovi.64 v3, -4.03523417114321381e-01 + fldai.64 -2.76742510726862411e-03 + fmul2.64 v8 + sta.64 v4 + fldai.64 4.99852801234917238e-03 + fmul2.64 v8 + sta.64 v5 + fldai.64 2.30417297573763929e-05 + fmul2.64 v8 + sta.64 v6 + fldai.64 2.85885980666130812e-04 + fmul2.64 v9 + sta.64 v7 + call.range BodyInit, v0 + lda.obj v0 + return.obj +} + +.function Body Uranus(){ + ldstatic Globals.DAYS_PER_YEAR + sta.64 v8 + ldstatic Globals.SOLAR_MASS + sta.64 v9 + newobj v0, Body + fmovi.64 v1, 1.28943695621391310e+01 + fmovi.64 v2, -1.51111514016986312e+01 + fmovi.64 v3, -2.23307578892655734e-01 + fldai.64 2.96460137564761618e-03 + fmul2.64 v8 + sta.64 v4 + fldai.64 2.37847173959480950e-03 + fmul2.64 v8 + sta.64 v5 + fldai.64 -2.96589568540237556e-05 + fmul2.64 v8 + sta.64 v6 + fldai.64 4.36624404335156298e-05 + fmul2.64 v9 + sta.64 v7 + call.range BodyInit, v0 + lda.obj v0 + return.obj +} + +.function Body Neptune(){ + ldstatic Globals.DAYS_PER_YEAR + sta.64 v8 + ldstatic Globals.SOLAR_MASS + sta.64 v9 + newobj v0, Body + fmovi.64 v1, 1.53796971148509165e+01 + fmovi.64 v2, -2.59193146099879641e+01 + fmovi.64 v3, 1.79258772950371181e-01 + fldai.64 2.68067772490389322e-03 + fmul2.64 v8 + sta.64 v4 + fldai.64 1.62824170038242295e-03 + fmul2.64 v8 + sta.64 v5 + fldai.64 -9.51592254519715870e-05 + fmul2.64 v8 + sta.64 v6 + fldai.64 5.15138902046611451e-05 + fmul2.64 v9 + sta.64 v7 + call.range BodyInit, v0 + lda.obj v0 + return.obj +} + +.function Body Sun(){ + ldstatic Globals.SOLAR_MASS + sta.64 v7 + newobj v0, Body + fmovi.64 v1, 0.0 + fmovi.64 v2, 0.0 + fmovi.64 v3, 0.0 + fmovi.64 v4, 0.0 + fmovi.64 v5, 0.0 + fmovi.64 v6, 0.0 + call.range BodyInit, v0 + lda.obj v0 + return.obj +} + +.function Body offsetMomentum(Body a0, f64 a1, f64 a2, f64 a3){ + ldstatic Globals.SOLAR_MASS + sta.64 v0 + lda.64 a1 + fneg.64 + fdiv2.64 v0 + stobj a0, Body.vx + lda.64 a2 + fneg.64 + fdiv2.64 v0 + stobj a0, Body.vy + lda.64 a3 + fneg.64 + fdiv2.64 v0 + stobj a0, Body.vz + lda.obj a0 + return.obj +} + +.function void NBodySystem(Body[] a0){ + fmovi.64 v0, 0.0 #px + fmovi.64 v1, 0.0 #py + fmovi.64 v2, 0.0 #pz + lenarr a0 + sta v3 #size + movi v4, 0 #loop_counter +loop: + lda v4 + jeq v3, loop_exit + ldarr.obj a0 + sta.obj v5 + ldobj v5, Body.mass + sta.64 v6 #m + ldobj v5, Body.vx + fmul2.64 v6 + fadd2.64 v0 + sta.64 v0 + ldobj v5, Body.vy + fmul2.64 v6 + fadd2.64 v1 + sta.64 v1 + ldobj v5, Body.vz + fmul2.64 v6 + fadd2.64 v2 + sta.64 v2 + inci v4, 1 + jmp loop +loop_exit: + ldai 0 + ldarr.obj a0 + sta.obj v5 + call offsetMomentum, v5, v0, v1, v2 + return.void +} + +.function void advance(Body[] a0, f64 a1){ + lenarr a0 + sta v0 #size + movi v1, 0 #loop counter +loop: + lda v1 + jeq v0, loop_exit + ldarr.obj a0 + sta.obj v2 #bodyi + lda v1 + addi 1 + sta v3 +loop2: + lda v3 + jeq v0, loop2_exit + ldarr.obj a0 + sta.obj v4 #bodyj + ldobj v4, Body.x + sta.64 v5 + ldobj v2, Body.x + fsub2.64 v5 + sta.64 v6 #dx + ldobj v4, Body.y + sta.64 v5 + ldobj v2, Body.y + fsub2.64 v5 + sta.64 v7 #dy + ldobj v4, Body.z + sta.64 v5 + ldobj v2, Body.z + fsub2.64 v5 + sta.64 v8 #dz + fmul2.64 v8 + sta.64 v9 + lda.64 v6 + fmul2.64 v6 + sta.64 v10 + lda.64 v7 + fmul2.64 v7 + fadd2.64 v9 + fadd2.64 v10 + sta.64 v9 + call.short Math.sqrt, v9 + sta.64 v9 #distance + fmul2.64 v9 + fmul2.64 v9 + sta.64 v10 + lda.64 a1 + fdiv2.64 v10 + sta.64 v10 #mag + ldobj v4, Body.mass + fmul2.64 v10 + sta.64 v11 + fmul2.64 v6 + sta.64 v12 + ldobj v2, Body.vx + fsub2.64 v12 + stobj v2, Body.vx + lda.64 v11 + fmul2.64 v7 + sta.64 v12 + ldobj v2, Body.vy + fsub2.64 v12 + stobj v2, Body.vy + lda.64 v11 + fmul2.64 v8 + sta.64 v12 + ldobj v2, Body.vz + fsub2.64 v12 + stobj v2, Body.vz + ldobj v2, Body.mass + fmul2.64 v10 + sta.64 v11 + fmul2.64 v6 + sta.64 v12 + ldobj v4, Body.vx + fadd2.64 v12 + stobj v4, Body.vx + lda.64 v11 + fmul2.64 v7 + sta.64 v12 + ldobj v4, Body.vy + fadd2.64 v12 + stobj v4, Body.vy + lda.64 v11 + fmul2.64 v8 + sta.64 v12 + ldobj v4, Body.vz + fadd2.64 v12 + stobj v4, Body.vz + inci v3, 1 + jmp loop2 +loop2_exit: + inci v1, 1 + jmp loop +loop_exit: + movi v1, 0 +loop3: + lda v1 + jeq v0, loop3_exit + ldarr.obj a0 + sta.obj v2 #body + ldobj v2, Body.vx + fmul2.64 a1 + sta.64 v12 + ldobj v2, Body.x + fadd2.64 v12 + stobj v2, Body.x + ldobj v2, Body.vy + fmul2.64 a1 + sta.64 v12 + ldobj v2, Body.y + fadd2.64 v12 + stobj v2, Body.y + ldobj v2, Body.vz + fmul2.64 a1 + sta.64 v12 + ldobj v2, Body.z + fadd2.64 v12 + stobj v2, Body.z + inci v1, 1 + jmp loop3 +loop3_exit: + return.void +} + +.function f64 energy(Body[] a0){ + lenarr a0 + sta v0 #size + fmovi.64 v1, 0.0 #e + movi v2, 0 +loop: + lda v2 + jeq v0, loop_exit + ldarr.obj a0 + sta.obj v3 #bodyi + ldobj v3, Body.vx + sta.64 v4 + fmul2.64 v4 + sta.64 v4 + ldobj v3, Body.vy + sta.64 v5 + fmul2.64 v5 + sta.64 v5 + ldobj v3, Body.vz + sta.64 v6 + fmul2.64 v6 + fadd2.64 v5 + fadd2.64 v4 + sta.64 v4 + fmovi.64 v5, 0.5 + ldobj v3, Body.mass + fmul2.64 v4 + fmul2.64 v5 + fadd2.64 v1 + sta.64 v1 + lda v2 + addi 1 + sta v7 +loop2: + lda v7 + jeq v0, loop2_exit + ldarr.obj a0 + sta.obj v8 #bodyj + ldobj v8, Body.x + sta.64 v4 + ldobj v3, Body.x + fsub2.64 v4 + sta.64 v5 #dx + fmul2.64 v5 + sta.64 v5 + ldobj v8, Body.y + sta.64 v4 + ldobj v3, Body.y + fsub2.64 v4 + sta.64 v6 #dy + fmul2.64 v6 + sta.64 v6 + ldobj v8, Body.z + sta.64 v4 + ldobj v3, Body.z + fsub2.64 v4 + sta.64 v10 #dz + fmul2.64 v10 + fadd2.64 v6 + fadd2.64 v5 + sta.64 v5 + call.short Math.sqrt, v5 + sta.64 v5 #distance + ldobj v3, Body.mass + sta.64 v6 + ldobj v8, Body.mass + fmul2.64 v6 + fdiv2.64 v5 + sta.64 v6 + lda.64 v1 + fsub2.64 v6 + sta.64 v1 + inci v7, 1 + jmp loop2 +loop2_exit: + inci v2, 1 + jmp loop +loop_exit: + lda.64 v1 + return.64 +} \ No newline at end of file diff --git a/runtime/tests/tooling/test_extractor.cpp b/runtime/tests/tooling/test_extractor.cpp index 25657b5d762130cb9262d59a9b87f5fd0b8e8561..d578799ef44b45172f2d907134efc892570a8113 100644 --- a/runtime/tests/tooling/test_extractor.cpp +++ b/runtime/tests/tooling/test_extractor.cpp @@ -45,12 +45,12 @@ std::pair TestExtractor::GetBreakpointAddress(const SourceLo std::vector methods = lang_extractor_->GetMethodIdList(); for (const auto &method : methods) { - auto srcName = PandaString(lang_extractor_->GetSourceFile(method)); - auto pos_sf = srcName.find_last_of("/\\"); + auto src_name = PandaString(lang_extractor_->GetSourceFile(method)); + auto pos_sf = src_name.find_last_of("/\\"); if (pos_sf != PandaString::npos) { - srcName = srcName.substr(pos_sf + 1); + src_name = src_name.substr(pos_sf + 1); } - if (srcName == name) { + if (src_name == name) { const panda_file::LineNumberTable &line_table = lang_extractor_->GetLineNumberTable(method); if (line_table.empty()) { continue; diff --git a/runtime/tests/tooling/test_extractor.h b/runtime/tests/tooling/test_extractor.h index 7ad5986b3b01b591da49613591a660776857235d..590ed91c1cde21e9ee6f7bac063bf94a7d935aeb 100644 --- a/runtime/tests/tooling/test_extractor.h +++ b/runtime/tests/tooling/test_extractor.h @@ -47,13 +47,13 @@ public: virtual ~TestExtractor() = default; - std::pair GetBreakpointAddress(const SourceLocation &sourceLocation); + std::pair GetBreakpointAddress(const SourceLocation &source_location); - PandaList GetStepRanges(EntityId methodId, uint32_t currentOffset); + PandaList GetStepRanges(EntityId method_id, uint32_t current_offset); - virtual std::vector GetLocalVariableInfo(EntityId methodId, size_t offset); + virtual std::vector GetLocalVariableInfo(EntityId method_id, size_t offset); - SourceLocation GetSourceLocation(EntityId methodId, uint32_t bytecodeOffset); + SourceLocation GetSourceLocation(EntityId method_id, uint32_t bytecode_offset); static std::optional GetLineNumberByTableOffset(const panda_file::LineNumberTable &table, uint32_t offset); static std::optional GetOffsetByTableLineNumber(const panda_file::LineNumberTable &table, size_t line); diff --git a/runtime/tests/tooling/test_util.cpp b/runtime/tests/tooling/test_util.cpp index d7cbad792c5d12713ad63c67085f1274039053b7..c4a264cb3761a2d8bba637823bd4a0250a4ca80e 100644 --- a/runtime/tests/tooling/test_util.cpp +++ b/runtime/tests/tooling/test_util.cpp @@ -34,9 +34,9 @@ TestExtractorFactory *TestUtil::extractor_factory_; std::vector TestUtil::GetVariables(Method *method, uint32_t offset) { - auto methodId = method->GetFileId(); + auto method_id = method->GetFileId(); auto pf = method->GetPandaFile(); - PtLocation location(pf->GetFilename().c_str(), methodId, offset); + PtLocation location(pf->GetFilename().c_str(), method_id, offset); return GetVariables(pf, location); } @@ -46,11 +46,11 @@ std::vector TestUtil::GetVariables(const panda_fi return extractor->GetLocalVariableInfo(location.GetMethodId(), location.GetBytecodeOffset()); } -int32_t TestUtil::GetValueRegister(Method *method, const char *varName, uint32_t offset) +int32_t TestUtil::GetValueRegister(Method *method, const char *var_name, uint32_t offset) { auto variables = TestUtil::GetVariables(method, offset); for (const auto &var : variables) { - if (var.name == varName) { + if (var.name == var_name) { return var.reg_number; } } diff --git a/runtime/tests/tooling/test_util.h b/runtime/tests/tooling/test_util.h index 1d43234b9ec50b3e23a4f350444d0a0264060b94..c97283ee69dd70d928d304bb1fdcd028c61e9489 100644 --- a/runtime/tests/tooling/test_util.h +++ b/runtime/tests/tooling/test_util.h @@ -33,7 +33,7 @@ using TestMap = std::unordered_map test) + static void RegisterTest(panda_file::SourceLang language, const char *test_name, std::unique_ptr test) { auto it = test_map_.find(language); if (it == test_map_.end()) { @@ -41,7 +41,7 @@ public: auto res = test_map_.emplace(language, std::move(entry)); it = res.first; } - it->second.insert({testName, std::move(test)}); + it->second.insert({test_name, std::move(test)}); } static void SetExtractorFactory(TestExtractorFactory *factory) @@ -52,11 +52,11 @@ public: static ApiTest *GetTest(const char *name) { for (auto iter = test_map_.begin(); iter != test_map_.end(); ++iter) { - auto &internalMap = iter->second; - auto internalIt = std::find_if(internalMap.begin(), internalMap.end(), - [name](auto &iterator) { return !::strcmp(iterator.first, name); }); - if (internalIt != internalMap.end()) { - return internalIt->second.get(); + auto &internal_map = iter->second; + auto internal_it = std::find_if(internal_map.begin(), internal_map.end(), + [name](auto &iterator) { return !::strcmp(iterator.first, name); }); + if (internal_it != internal_map.end()) { + return internal_it->second.get(); } } LOG(FATAL, DEBUGGER) << "Test " << name << " not found"; @@ -65,17 +65,17 @@ public: static PtThread WaitForBreakpoint(PtLocation location) { - PtThread stoppedThread(PtThread::NONE); + PtThread stopped_thread(PtThread::NONE); auto predicate = [&location]() REQUIRES(event_mutex_) { return last_event_location_ == location; }; - auto onSuccess = [&stoppedThread]() REQUIRES(event_mutex_) { - stoppedThread = last_event_thread_; + auto on_success = [&stopped_thread]() REQUIRES(event_mutex_) { + stopped_thread = last_event_thread_; // Need to reset location, because we might want to stop at the same point last_event_location_ = PtLocation("", EntityId(0), 0); }; - WaitForEvent(DebugEvent::BREAKPOINT, predicate, onSuccess); - return stoppedThread; + WaitForEvent(DebugEvent::BREAKPOINT, predicate, on_success); + return stopped_thread; } static bool WaitForExit() @@ -122,24 +122,24 @@ public: return last_event_ == DebugEvent::VM_DEATH; } - static PtLocation GetLocation(const char *sourceFile, uint32_t line, const char *pandaFile) + static PtLocation GetLocation(const char *source_file, uint32_t line, const char *panda_file) { - std::unique_ptr uFile = panda_file::File::Open(pandaFile); - const panda_file::File *pf = uFile.get(); + std::unique_ptr u_file = panda_file::File::Open(panda_file); + const panda_file::File *pf = u_file.get(); if (pf == nullptr) { return PtLocation("", EntityId(0), 0); } auto extractor = extractor_factory_->MakeTestExtractor(pf); - auto [id, offset] = extractor->GetBreakpointAddress({sourceFile, line}); - return PtLocation(pandaFile, id, offset); + auto [id, offset] = extractor->GetBreakpointAddress({source_file, line}); + return PtLocation(panda_file, id, offset); } static std::vector GetVariables(Method *method, uint32_t offset); static std::vector GetVariables(const panda_file::File *pf, PtLocation location); - static int32_t GetValueRegister(Method *method, const char *varName, uint32_t offset = 0); + static int32_t GetValueRegister(Method *method, const char *var_name, uint32_t offset = 0); static bool SuspendUntilContinue(DebugEvent reason, PtThread thread, PtLocation location) { @@ -170,10 +170,10 @@ public: return true; } - static bool GetUserThreadList(DebugInterface *debugInterface, PandaVector *threadList) + static bool GetUserThreadList(DebugInterface *debug_interface, PandaVector *thread_list) { PandaVector threads; - debugInterface->GetThreadList(&threads); + debug_interface->GetThreadList(&threads); for (auto &thread : threads) { ManagedThread *managed_thread = thread.GetManagedThread(); @@ -183,7 +183,7 @@ public: continue; } } - threadList->push_back(thread); + thread_list->push_back(thread); } return true; } @@ -198,8 +198,8 @@ private: return false; } constexpr uint64_t TIMEOUT_MSEC = 100000U; - bool timeExceeded = event_cv_.TimedWait(&event_mutex_, TIMEOUT_MSEC); - if (timeExceeded) { + bool time_exceeded = event_cv_.TimedWait(&event_mutex_, TIMEOUT_MSEC); + if (time_exceeded) { LOG(FATAL, DEBUGGER) << "Time limit exceeded while waiting " << event; return false; } diff --git a/runtime/thread.cpp b/runtime/thread.cpp index 18e828dc1e2d5c0d91c43d539576067b5b322bb3..b558d574ab5d3bbc82c0928841a0b9a0c879efad 100644 --- a/runtime/thread.cpp +++ b/runtime/thread.cpp @@ -169,6 +169,9 @@ MTManagedThread *MTManagedThread::Create(Runtime *runtime, PandaVM *vm, panda::p // runtime is destroyed auto thread = new MTManagedThread(os::thread::GetCurrentThreadId(), allocator, vm, thread_lang); thread->ProcessCreatedThread(); + + runtime->GetNotificationManager()->ThreadStartEvent(thread); + return thread; } diff --git a/runtime/tooling/inspector/CMakeLists.txt b/runtime/tooling/inspector/CMakeLists.txt index 29a6c077cfc7183a6d5be10b5087d7bbbcf5a2b7..6a04592381fbdd5bfa9b8f748608201232f89eda 100644 --- a/runtime/tooling/inspector/CMakeLists.txt +++ b/runtime/tooling/inspector/CMakeLists.txt @@ -50,7 +50,7 @@ target_link_libraries(arkinspector panda_add_sanitizers(TARGET arkinspector SANITIZERS ${PANDA_SANITIZERS_LIST}) -if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 14.0.0) +if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13.0.0) target_compile_options(arkinspector PUBLIC "-Wno-null-pointer-subtraction") endif() diff --git a/runtime/tooling/sampler/sample_info.h b/runtime/tooling/sampler/sample_info.h new file mode 100644 index 0000000000000000000000000000000000000000..124c1d16b1fdb1ad50ae2275dc3cb21b5f74bc05 --- /dev/null +++ b/runtime/tooling/sampler/sample_info.h @@ -0,0 +1,125 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_INFO_H_ +#define PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_INFO_H_ + +#include +#include +#include + +#include "libpandabase/macros.h" + +namespace panda::tooling::sampler { + +// Saving one sample info +struct SampleInfo { + struct ManagedStackFrameId { + uintptr_t file_id {0}; + uintptr_t panda_file_ptr {0}; + }; + static constexpr size_t MAX_STACK_DEPTH = 100; + + // We can't use dynamic memory 'cause of usage inside the signal handler + std::array managed_stack; + uintptr_t managed_stack_size {0}; +}; + +// Saving one module info (panda file, .so) +struct FileInfo { + uintptr_t ptr {0}; + std::string pathname; +}; + +bool operator==(const SampleInfo &lhs, const SampleInfo &rhs); +bool operator!=(const SampleInfo &lhs, const SampleInfo &rhs); +bool operator==(const FileInfo &lhs, const FileInfo &rhs); +bool operator!=(const FileInfo &lhs, const FileInfo &rhs); +bool operator==(const SampleInfo::ManagedStackFrameId &lhs, const SampleInfo::ManagedStackFrameId &rhs); +bool operator!=(const SampleInfo::ManagedStackFrameId &lhs, const SampleInfo::ManagedStackFrameId &rhs); + +inline uintptr_t ReadUintptrTBitMisaligned(const void *ptr) +{ + /* + * Pointer might be misaligned + * To avoid of UB we should read misaligned adresses with memcpy + */ + std::array buf = {}; + memcpy(buf.data(), ptr, sizeof(uintptr_t)); + uintptr_t value = *reinterpret_cast(buf.data()); + return value; +} + +inline bool operator==(const SampleInfo::ManagedStackFrameId &lhs, const SampleInfo::ManagedStackFrameId &rhs) +{ + return lhs.file_id == rhs.file_id && lhs.panda_file_ptr == rhs.panda_file_ptr; +} + +inline bool operator!=(const SampleInfo::ManagedStackFrameId &lhs, const SampleInfo::ManagedStackFrameId &rhs) +{ + return !(lhs == rhs); +} + +inline bool operator==(const FileInfo &lhs, const FileInfo &rhs) +{ + return lhs.ptr == rhs.ptr && lhs.pathname == rhs.pathname; +} + +inline bool operator!=(const FileInfo &lhs, const FileInfo &rhs) +{ + return !(lhs == rhs); +} + +inline bool operator==(const SampleInfo &lhs, const SampleInfo &rhs) +{ + if (lhs.managed_stack_size != rhs.managed_stack_size) { + return false; + } + + for (uint32_t i = 0; i < lhs.managed_stack_size; ++i) { + if (lhs.managed_stack[i] != rhs.managed_stack[i]) { + return false; + } + } + + return true; +} + +inline bool operator!=(const SampleInfo &lhs, const SampleInfo &rhs) +{ + return !(lhs == rhs); +} + +} // namespace panda::tooling::sampler + +// Definind std::hash for SampleInfo to use it as an unordered_map key +namespace std { + +template <> +struct hash { + std::size_t operator()(const panda::tooling::sampler::SampleInfo &s) const + { + ASSERT(s.managed_stack_size <= panda::tooling::sampler::SampleInfo::MAX_STACK_DEPTH); + size_t summ = 0; + for (size_t i = 0; i < s.managed_stack_size; ++i) { + summ += s.managed_stack[i].panda_file_ptr ^ s.managed_stack[i].file_id; + } + return std::hash()(summ ^ s.managed_stack_size); + } +}; + +} // namespace std + +#endif // PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_INFO_H_ \ No newline at end of file diff --git a/runtime/tooling/sampler/sample_reader-inl.h b/runtime/tooling/sampler/sample_reader-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..67328eea24642384dea4668ed1d3e6e2b4cb33c5 --- /dev/null +++ b/runtime/tooling/sampler/sample_reader-inl.h @@ -0,0 +1,125 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "libpandabase/utils/logger.h" + +#include "runtime/tooling/sampler/sample_info.h" +#include "runtime/tooling/sampler/sample_reader.h" +#include "runtime/tooling/sampler/sample_writer.h" + +namespace panda::tooling::sampler { + +// ------------------------------------------------- +// ------------- Format reader --------------------- +// ------------------------------------------------- + +/* + * Example for 64 bit architecture + * + * Stack Size Stack frames + * Sample row |__________|_____________------___________| + * 64 bits (128 * ) bits + * + * 0xFF..FF pointer name size module path (ASCII str) + * Module row |__________|__________|__________|_____________------___________| + * 64 bits 64 bits 64 bits (8 * ) bits + */ +inline SampleReader::SampleReader(const char *filename) +{ + { + std::ifstream bin_file(filename, std::ios::binary | std::ios::ate); + if (!bin_file) { + LOG(FATAL, PROFILER) << "Unable to open file \"" << filename << "\""; + UNREACHABLE(); + } + std::streamsize buffer_size = bin_file.tellg(); + bin_file.seekg(0, std::ios::beg); + + buffer_.resize(buffer_size); + + if (!bin_file.read(buffer_.data(), buffer_size)) { + LOG(FATAL, PROFILER) << "Unable to read sampler trace file"; + UNREACHABLE(); + } + bin_file.close(); + } + + size_t buffer_counter = 0; + while (buffer_counter < buffer_.size()) { + if (ReadUintptrTBitMisaligned(&buffer_[buffer_counter]) == StreamWriter::MODULE_INDICATOR_VALUE) { + // This entry is panda file + module_row_ptrs_.push_back(&buffer_[buffer_counter]); + size_t pf_name_size = ReadUintptrTBitMisaligned(&buffer_[buffer_counter + PANDA_FILE_NAME_SIZE_OFFSET]); + buffer_counter += PANDA_FILE_NAME_OFFSET + pf_name_size * sizeof(char); + continue; + } + // This entry is a sample + sample_row_ptrs_.push_back(&buffer_[buffer_counter]); + size_t stack_size = ReadUintptrTBitMisaligned(&buffer_[buffer_counter]); + + if (stack_size > SampleInfo::MAX_STACK_DEPTH) { + LOG(FATAL, PROFILER) << "ark sampling profiler trace file is invalid"; + UNREACHABLE(); + } + buffer_counter += SAMPLE_STACK_OFFSET + stack_size * sizeof(SampleInfo::ManagedStackFrameId); + } + if (buffer_counter != buffer_.size()) { + LOG(FATAL, PROFILER) << "ark sampling profiler trace file is invalid"; + UNREACHABLE(); + } +} + +// NOLINTBEGIN(cppcoreguidelines-pro-bounds-pointer-arithmetic) +inline bool SampleReader::GetNextSample(SampleInfo *sample_out) +{ + if (sample_row_ptrs_.size() <= sample_row_counter_) { + return false; + } + const char *current_sample_ptr = sample_row_ptrs_[sample_row_counter_]; + sample_out->managed_stack_size = ReadUintptrTBitMisaligned(¤t_sample_ptr[SAMPLE_STACK_SIZE_OFFSET]); + + ASSERT(sample_out->managed_stack_size <= SampleInfo::MAX_STACK_DEPTH); + memcpy(sample_out->managed_stack.data(), current_sample_ptr + SAMPLE_STACK_OFFSET, + sample_out->managed_stack_size * sizeof(SampleInfo::ManagedStackFrameId)); + ++sample_row_counter_; + return true; +} + +inline bool SampleReader::GetNextModule(FileInfo *module_out) +{ + if (module_row_ptrs_.size() <= module_row_counter_) { + return false; + } + + module_out->pathname.clear(); + + const char *current_module_ptr = module_row_ptrs_[module_row_counter_]; + module_out->ptr = ReadUintptrTBitMisaligned(¤t_module_ptr[PANDA_FILE_POINTER_OFFSET]); + size_t str_size = ReadUintptrTBitMisaligned(¤t_module_ptr[PANDA_FILE_NAME_SIZE_OFFSET]); + const char *str_ptr = ¤t_module_ptr[PANDA_FILE_NAME_OFFSET]; + + for (size_t i = 0; i < str_size; ++i) { + module_out->pathname.push_back(str_ptr[i]); + } + + ++module_row_counter_; + return true; +} +// NOLINTEND(cppcoreguidelines-pro-bounds-pointer-arithmetic) + +} // namespace panda::tooling::sampler \ No newline at end of file diff --git a/runtime/tooling/sampler/sample_reader.h b/runtime/tooling/sampler/sample_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..688ea973536293177d10f4a89b3ca231ef11ec56 --- /dev/null +++ b/runtime/tooling/sampler/sample_reader.h @@ -0,0 +1,54 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_READER_H_ +#define PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_READER_H_ + +#include "libpandabase/macros.h" + +#include "runtime/tooling/sampler/sample_info.h" + +namespace panda::tooling::sampler { + +// Reader of .aspt format +class SampleReader final { +public: + static constexpr size_t SAMPLE_STACK_SIZE_OFFSET = 0 * sizeof(uintptr_t); + static constexpr size_t SAMPLE_STACK_OFFSET = 1 * sizeof(uintptr_t); + static constexpr size_t PANDA_FILE_POINTER_OFFSET = 1 * sizeof(uintptr_t); + static constexpr size_t PANDA_FILE_NAME_SIZE_OFFSET = 2 * sizeof(uintptr_t); + static constexpr size_t PANDA_FILE_NAME_OFFSET = 3 * sizeof(uintptr_t); + + inline explicit SampleReader(const char *filename); + ~SampleReader() = default; + + inline bool GetNextSample(SampleInfo *sample_out); + inline bool GetNextModule(FileInfo *module_out); + + NO_COPY_SEMANTIC(SampleReader); + NO_MOVE_SEMANTIC(SampleReader); + +private: + // Using std::vector instead of PandaVector 'cause it should be used in tool without runtime + std::vector buffer_; + std::vector sample_row_ptrs_; + std::vector module_row_ptrs_; + size_t sample_row_counter_ {0}; + size_t module_row_counter_ {0}; +}; + +} // namespace panda::tooling::sampler + +#endif // PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_READER_H_ \ No newline at end of file diff --git a/runtime/tooling/sampler/sample_writer.cpp b/runtime/tooling/sampler/sample_writer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..87c84c656ee4417bb0db8e1985040d48be4d5f07 --- /dev/null +++ b/runtime/tooling/sampler/sample_writer.cpp @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "runtime/tooling/sampler/sample_info.h" +#include "runtime/tooling/sampler/sample_writer.h" + +namespace panda::tooling::sampler { + +void StreamWriter::WriteSample(const SampleInfo &sample) const +{ + ASSERT(write_stream_ptr_ != nullptr); + ASSERT(sample.managed_stack_size <= SampleInfo::MAX_STACK_DEPTH); + + static_assert(sizeof(sample.managed_stack_size) == sizeof(uintptr_t)); + + write_stream_ptr_->write(reinterpret_cast(&sample.managed_stack_size), + sizeof(sample.managed_stack_size)); + write_stream_ptr_->write(reinterpret_cast(sample.managed_stack.data()), + sample.managed_stack_size * sizeof(SampleInfo::ManagedStackFrameId)); +} + +void StreamWriter::WriteModule(const FileInfo &module_info) const +{ + ASSERT(write_stream_ptr_ != nullptr); + static_assert(sizeof(MODULE_INDICATOR_VALUE) == sizeof(uintptr_t)); + static_assert(sizeof(module_info.ptr) == sizeof(uintptr_t)); + static_assert(sizeof(module_info.pathname.length()) == sizeof(uintptr_t)); + + if (module_info.pathname.empty()) { + return; + } + size_t str_size = module_info.pathname.length(); + + write_stream_ptr_->write(reinterpret_cast(&MODULE_INDICATOR_VALUE), sizeof(MODULE_INDICATOR_VALUE)); + write_stream_ptr_->write(reinterpret_cast(&module_info.ptr), sizeof(module_info.ptr)); + write_stream_ptr_->write(reinterpret_cast(&str_size), sizeof(module_info.pathname.length())); + write_stream_ptr_->write(module_info.pathname.data(), module_info.pathname.length() * sizeof(char)); +} + +} // namespace panda::tooling::sampler \ No newline at end of file diff --git a/runtime/tooling/sampler/sample_writer.h b/runtime/tooling/sampler/sample_writer.h new file mode 100644 index 0000000000000000000000000000000000000000..d2bab73b57e24d394601bbaa3f01ec06d6c25ae8 --- /dev/null +++ b/runtime/tooling/sampler/sample_writer.h @@ -0,0 +1,97 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_WRITER_H_ +#define PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_WRITER_H_ + +#include +#include +#include + +#include "libpandabase/os/thread.h" + +#include "runtime/tooling/sampler/sample_info.h" + +namespace panda::tooling::sampler { + +/* + * ======================================================= + * ============= Sampler binary format ================== + * ======================================================= + * + * Writing with the fasters and more convenient format .aspt + * Then it should be converted to flamegraph + * + * .aspt - ark sampling profiler trace file, binary format + * + * .aspt consists of 2 type information: + * - module row (panda file and its pointer) + * - sample row (sample information) + * + * module row for 64-bits: + * first 8 byte is 0xFFFFFFFF (to recognize that it's not a sample row) + * next 8 byte is pointer module + * next 8 byte is size of panda file name + * next bytes is panda fila name in ASCII symbols + * + * sample row: + * first 8 bytes is stack size + * next bytes is stack frame + * one stack frame is panda file ptr and file id + * + * Example for 64-bit architecture: + * + * Stack Size Stack frames + * Sample row |__________|_____________------___________| + * 64 bits (128 * ) bits + * + * 0xFF..FF pointer name size module path (ASCII str) + * Module row |__________|__________|__________|_____________------___________| + * 64 bits 64 bits 64 bits (8 * ) bits + */ +class StreamWriter final { +public: + explicit StreamWriter(const char *filename) + { + /* + * This class instance should be used only from one thread + * It may lead to format invalidation + * This class wasn't made thread safe for performance reason + */ + write_stream_ptr_ = std::make_unique(filename, std::ios::binary); + ASSERT(write_stream_ptr_ != nullptr); + } + + ~StreamWriter() + { + write_stream_ptr_->flush(); + write_stream_ptr_->close(); + }; + + void WriteModule(const FileInfo &module_info) const; + void WriteSample(const SampleInfo &sample) const; + + NO_COPY_SEMANTIC(StreamWriter); + NO_MOVE_SEMANTIC(StreamWriter); + + static constexpr uintptr_t MODULE_INDICATOR_VALUE = 0xFFFFFFFF; + +private: + std::unique_ptr write_stream_ptr_; +}; + +} // namespace panda::tooling::sampler + +#endif // PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLE_WRITER_H_ \ No newline at end of file diff --git a/runtime/tooling/sampler/sampling_profiler.cpp b/runtime/tooling/sampler/sampling_profiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..78b521805d9f3d7052e1c35d5f7671d44efaba17 --- /dev/null +++ b/runtime/tooling/sampler/sampling_profiler.cpp @@ -0,0 +1,307 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "libpandabase/macros.h" +#include "os/thread.h" +#include "runtime/tooling/sampler/sampling_profiler.h" +#include "runtime/include/managed_thread.h" +#include "runtime/thread_manager.h" + +namespace panda::tooling::sampler { + +/* static */ +Sampler *Sampler::instance_ = nullptr; + +static size_t S_LOST_SAMPLES = 0; +static size_t S_TOTAL_SAMPLES = 0; + +/* static */ +Sampler *Sampler::Create() +{ + /* + * Sampler can be created only once and managed by one thread + * Runtime::Tools owns it ptr after it's created + */ + ASSERT(instance_ == nullptr); + instance_ = new Sampler; + + return Sampler::instance_; +} + +/* static */ +void Sampler::Destroy(Sampler *sampler) +{ + ASSERT(instance_ != nullptr); + ASSERT(instance_ == sampler); + ASSERT(instance_->agent_tid_ == os::thread::GetNativeHandle()); + ASSERT(!sampler->is_active_); + + LOG(INFO, PROFILER) << "Total samples: " << S_TOTAL_SAMPLES << "\nLost samples: " << S_LOST_SAMPLES; + + delete sampler; + instance_ = nullptr; +} + +Sampler::Sampler() : runtime_(Runtime::GetCurrent()), sample_interval_(DEFAULT_SAMPLE_INTERVAL_US) +{ + ASSERT_NATIVE_CODE(); + + // Sampler constructor should be called in agent thread + agent_tid_ = os::thread::GetNativeHandle(); +} + +void Sampler::AddThreadHandle(ManagedThread *thread) +{ + os::memory::LockHolder holder(managed_threads_lock_); + managed_threads_.insert(thread->GetId()); +} + +void Sampler::EraseThreadHandle(ManagedThread *thread) +{ + os::memory::LockHolder holder(managed_threads_lock_); + managed_threads_.erase(thread->GetId()); +} + +void Sampler::ThreadStart(ManagedThread *managed_thread) +{ + AddThreadHandle(managed_thread); +} + +void Sampler::ThreadEnd(ManagedThread *managed_thread) +{ + EraseThreadHandle(managed_thread); +} + +void Sampler::LoadModule(std::string_view name) +{ + auto callback = [this, name](const panda_file::File &pf) { + if (pf.GetFilename() == name) { + auto ptr_id = reinterpret_cast(&pf); + FileInfo pf_module; + pf_module.ptr = ptr_id; + pf_module.pathname = pf.GetFullFileName(); + os::memory::LockHolder holder(loaded_pfs_lock_); + this->loaded_pfs_.push_back(pf_module); + return false; + } + return true; + }; + runtime_->GetClassLinker()->EnumeratePandaFiles(callback, false); +} + +bool Sampler::Start(const char *filename) +{ + ASSERT(agent_tid_ == os::thread::GetNativeHandle()); + ASSERT(runtime_ != nullptr); + + if (is_active_) { + LOG(ERROR, PROFILER) << "Attemp to start sampling profiler while it's already started"; + return false; + } + + if (UNLIKELY(!communicator_.Init())) { + LOG(ERROR, PROFILER) << "Failed to create pipes for sampling listener. Profiler cannot be started"; + return false; + } + + runtime_->GetNotificationManager()->AddListener(this, RuntimeNotificationManager::Event::THREAD_EVENTS); + runtime_->GetNotificationManager()->AddListener(this, RuntimeNotificationManager::Event::LOAD_MODULE); + CollectThreads(); + + is_active_ = true; + // Creating std::string instead of sending pointer to avoid UB stack-use-after-scope + listener_thread_ = std::make_unique(&Sampler::ListenerThreadEntry, this, std::string(filename)); + listener_tid_ = listener_thread_->native_handle(); + + // All prepairing actions should be done before this thread is started + sampler_thread_ = std::make_unique(&Sampler::SamplerThreadEntry, this); + sampler_tid_ = sampler_thread_->native_handle(); + + return true; +} + +void Sampler::Stop() +{ + ASSERT(agent_tid_ == os::thread::GetNativeHandle()); + + if (!is_active_) { + LOG(ERROR, PROFILER) << "Attemp to stop sampling profiler, but it was not started"; + return; + } + if (!sampler_thread_->joinable()) { + LOG(FATAL, PROFILER) << "Sampling profiler thread unexpectedly disappeared"; + UNREACHABLE(); + } + if (!listener_thread_->joinable()) { + LOG(FATAL, PROFILER) << "Listener profiler thread unexpectedly disappeared"; + UNREACHABLE(); + } + + is_active_ = false; + sampler_thread_->join(); + listener_thread_->join(); + + // After threads are stopped we can clear all sampler info + sampler_thread_.reset(); + listener_thread_.reset(); + sampler_tid_ = 0; + listener_tid_ = 0; + + runtime_->GetNotificationManager()->RemoveListener(this, RuntimeNotificationManager::Event::THREAD_EVENTS); + runtime_->GetNotificationManager()->RemoveListener(this, RuntimeNotificationManager::Event::LOAD_MODULE); + + { + os::memory::LockHolder holder(managed_threads_lock_); + managed_threads_.clear(); + } +} + +void Sampler::WritePandaFiles(const StreamWriter *writer_ptr) +{ + auto callback = [writer_ptr](const panda_file::File &pf) { + auto ptr_id = reinterpret_cast(&pf); + FileInfo pf_module; + pf_module.ptr = ptr_id; + pf_module.pathname = pf.GetFullFileName(); + + writer_ptr->WriteModule(pf_module); + return true; + }; + runtime_->GetClassLinker()->EnumeratePandaFiles(callback, false); +} + +void Sampler::WriteLoadedPandaFiles(const StreamWriter *writer_ptr) +{ + os::memory::LockHolder holder(loaded_pfs_lock_); + for (const auto &module : loaded_pfs_) { + writer_ptr->WriteModule(module); + } + loaded_pfs_.clear(); +} + +void Sampler::CollectThreads() +{ + auto t_manager = runtime_->GetPandaVM()->GetThreadManager(); + if (UNLIKELY(t_manager == nullptr)) { + // TODO(m.strizhak): make it for languages without thread_manager + LOG(FATAL, PROFILER) << "Thread manager is nullptr"; + UNREACHABLE(); + } + + t_manager->EnumerateThreads( + [this](ManagedThread *thread) { + AddThreadHandle(thread); + return true; + }, + static_cast(EnumerationFlag::ALL), static_cast(EnumerationFlag::VM_THREAD)); +} + +void SigProfSamplingProfilerHandler([[maybe_unused]] int signum) +{ + auto mthread = ManagedThread::GetCurrent(); + ASSERT(mthread != nullptr); + + // Checking that code is being executed + auto frame_ptr = reinterpret_cast(mthread->GetCurrentFrame()); + if (frame_ptr == nullptr) { + return; + } + + S_TOTAL_SAMPLES++; + auto stack_walker = StackWalker::Create(mthread); + + SampleInfo sample; + size_t stack_counter = 0; + while (stack_walker.HasFrame()) { + auto method = stack_walker.GetMethod(); + if (method == nullptr) { + // sample is not valid + S_LOST_SAMPLES++; + return; + } + + auto pf_id = reinterpret_cast(method->GetPandaFile()); + sample.managed_stack[stack_counter].panda_file_ptr = pf_id; + sample.managed_stack[stack_counter].file_id = method->GetFileId().GetOffset(); + + ++stack_counter; + stack_walker.NextFrame(); + + if (stack_counter == SampleInfo::MAX_STACK_DEPTH) { + // According to the limitations we should drop all frames that is higher than MAX_STACK_DEPTH + break; + } + } + if (stack_counter == 0) { + return; + } + sample.managed_stack_size = stack_counter; + const ThreadCommunicator &communicator = Sampler::GetSampleCommunicator(); + communicator.SendSample(sample); +} + +void Sampler::SamplerThreadEntry() +{ + signal(SIGPROF, SigProfSamplingProfilerHandler); + auto pid = getpid(); + // Atomic with relaxed order reason: data race with is_active_ + while (is_active_.load(std::memory_order_relaxed)) { + { + os::memory::LockHolder holder(managed_threads_lock_); + for (const auto &thread_id : managed_threads_) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg) + if (syscall(SYS_tgkill, pid, thread_id, SIGPROF) != 0) { + LOG(ERROR, PROFILER) << "Can't send signal to thread"; + } + } + } + os::thread::NativeSleepUS(sample_interval_); + } + + // Sending last sample on finish to avoid of deadlock in listener + SampleInfo last_sample; + last_sample.managed_stack_size = 0; + communicator_.SendSample(last_sample); +} + +// Passing std:string copy instead of reference, 'cause another thread owns this object +// NOLINTNEXTLINE(performance-unnecessary-value-param) +void Sampler::ListenerThreadEntry(std::string output_file) +{ + auto writer_ptr = std::make_unique(output_file.c_str()); + // Writting panda files to .aspt in the same thread + WritePandaFiles(writer_ptr.get()); + + SampleInfo buffer_sample; + // Atomic with relaxed order reason: data race with is_active_ + while (is_active_.load(std::memory_order_relaxed)) { + communicator_.ReadSample(&buffer_sample); + if (LIKELY(buffer_sample.managed_stack_size != 0)) { + writer_ptr->WriteSample(buffer_sample); + } + } + // Writing all remaining samples + while (!communicator_.IsPipeEmpty()) { + communicator_.ReadSample(&buffer_sample); + if (LIKELY(buffer_sample.managed_stack_size != 0)) { + writer_ptr->WriteSample(buffer_sample); + } + } + WriteLoadedPandaFiles(writer_ptr.get()); +} + +} // namespace panda::tooling::sampler diff --git a/runtime/tooling/sampler/sampling_profiler.h b/runtime/tooling/sampler/sampling_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..ddc9d8d2330a605bfd9108765ab28e38aca9390e --- /dev/null +++ b/runtime/tooling/sampler/sampling_profiler.h @@ -0,0 +1,116 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLING_PROFILER_H_ +#define PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLING_PROFILER_H_ + +#include + +#include "libpandabase/macros.h" +#include "platforms/unix/libpandabase/pipe.h" +#include "runtime/include/panda_vm.h" +#include "runtime/include/runtime_notification.h" +#include "runtime/include/mem/panda_containers.h" + +#include "runtime/tooling/sampler/sample_info.h" +#include "runtime/tooling/sampler/sample_writer.h" +#include "runtime/tooling/sampler/thread_communicator.h" + +namespace panda::tooling::sampler { + +namespace test { +class SamplerTest; +} // namespace test + +// Panda sampling profiler +class Sampler final : public RuntimeListener { +public: + ~Sampler() override = default; + + static Sampler *Create(); + static void Destroy(Sampler *sampler); + + // Need to get comunicator inside the signal handler + static const ThreadCommunicator &GetSampleCommunicator() + { + ASSERT(instance_ != nullptr); + return instance_->GetCommunicator(); + } + + const ThreadCommunicator &GetCommunicator() const + { + return communicator_; + } + + void SetSampleInterval(uint32_t us) + { + ASSERT(is_active_ == false); + sample_interval_ = static_cast(us); + } + + bool Start(const char *filename); + void Stop(); + + // Events: Notify profiler that managed thread created or finished + void ThreadStart(ManagedThread *managed_thread) override; + void ThreadEnd(ManagedThread *managed_thread) override; + void LoadModule(std::string_view name) override; + + static constexpr uint32_t DEFAULT_SAMPLE_INTERVAL_US = 500; + +private: + Sampler(); + + void SamplerThreadEntry(); + void ListenerThreadEntry(std::string output_file); + + void AddThreadHandle(ManagedThread *thread); + void EraseThreadHandle(ManagedThread *thread); + + void CollectThreads(); + + void WritePandaFiles(const StreamWriter *writer_ptr); + void WriteLoadedPandaFiles(const StreamWriter *writer_ptr); + + static Sampler *instance_; + + Runtime *runtime_ {nullptr}; + // Remember agent thread id for security + os::thread::native_handle_type agent_tid_ {0}; + os::thread::native_handle_type listener_tid_ {0}; + os::thread::native_handle_type sampler_tid_ {0}; + std::unique_ptr sampler_thread_ {nullptr}; + std::unique_ptr listener_thread_ {nullptr}; + ThreadCommunicator communicator_; + + std::atomic is_active_ {false}; + + PandaSet managed_threads_ GUARDED_BY(managed_threads_lock_); + os::memory::Mutex managed_threads_lock_; + + PandaVector loaded_pfs_ GUARDED_BY(loaded_pfs_lock_); + os::memory::Mutex loaded_pfs_lock_; + + std::chrono::microseconds sample_interval_; + + friend class test::SamplerTest; + + NO_COPY_SEMANTIC(Sampler); + NO_MOVE_SEMANTIC(Sampler); +}; + +} // namespace panda::tooling::sampler + +#endif // PANDA_RUNTIME_TOOLING_SAMPLER_SAMPLING_PROFILER_H_ \ No newline at end of file diff --git a/runtime/tooling/sampler/thread_communicator.cpp b/runtime/tooling/sampler/thread_communicator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..843e2098ae7e6d62ce94ac56a3db704811bf4ea2 --- /dev/null +++ b/runtime/tooling/sampler/thread_communicator.cpp @@ -0,0 +1,57 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "runtime/tooling/sampler/sample_info.h" +#include "runtime/tooling/sampler/thread_communicator.h" + +namespace panda::tooling::sampler { + +bool ThreadCommunicator::IsPipeEmpty() const +{ + ASSERT(listener_pipe_[PIPE_READ_ID] != 0 && listener_pipe_[PIPE_WRITE_ID] != 0); + + struct pollfd poll_fd = {listener_pipe_[PIPE_READ_ID], POLLIN, 0}; + return poll(&poll_fd, 1, 0) == 0; +} + +bool ThreadCommunicator::SendSample(const SampleInfo &sample) const +{ + ASSERT(listener_pipe_[PIPE_READ_ID] != 0 && listener_pipe_[PIPE_WRITE_ID] != 0); + + const void *buffer = reinterpret_cast(&sample); + ssize_t syscall_result = write(listener_pipe_[PIPE_WRITE_ID], buffer, sizeof(SampleInfo)); + if (syscall_result == -1) { + return false; + } + ASSERT(syscall_result == sizeof(SampleInfo)); + return true; +} + +bool ThreadCommunicator::ReadSample(SampleInfo *sample) const +{ + ASSERT(listener_pipe_[PIPE_READ_ID] != 0 && listener_pipe_[PIPE_WRITE_ID] != 0); + + void *buffer = reinterpret_cast(sample); + + // TODO(m.strizhak): optimize by reading several samples by one call + ssize_t syscall_result = read(listener_pipe_[PIPE_READ_ID], buffer, sizeof(SampleInfo)); + if (syscall_result == -1) { + return false; + } + ASSERT(syscall_result == sizeof(SampleInfo)); + return true; +} + +} // namespace panda::tooling::sampler \ No newline at end of file diff --git a/runtime/tooling/sampler/thread_communicator.h b/runtime/tooling/sampler/thread_communicator.h new file mode 100644 index 0000000000000000000000000000000000000000..daf4bcc8ef293441cc09254352d3a9fa0980f9db --- /dev/null +++ b/runtime/tooling/sampler/thread_communicator.h @@ -0,0 +1,74 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_TOOLING_SAMPLER_THREAD_COMMUNICATOR_H_ +#define PANDA_RUNTIME_TOOLING_SAMPLER_THREAD_COMMUNICATOR_H_ + +#include +#include +#include + +#include "libpandabase/macros.h" +#include "libpandabase/utils/logger.h" +#include "os/failure_retry.h" + +#include "runtime/tooling/sampler/sample_info.h" + +namespace panda::tooling::sampler { + +namespace test { +class SamplerTest; +} // namespace test + +class ThreadCommunicator final { +public: + static constexpr uint8_t PIPE_READ_ID {0}; + static constexpr uint8_t PIPE_WRITE_ID {1}; + + ThreadCommunicator() = default; + + ~ThreadCommunicator() + { + for (int fd : listener_pipe_) { + if (fd != -1) { + LOG_IF(PANDA_FAILURE_RETRY(::close(fd)) != 0, FATAL, PROFILER) << "Cannot close fd: " << fd; + } + } + } + + bool Init() + { + if (listener_pipe_[PIPE_READ_ID] != -1) { + return true; + } + return pipe2(listener_pipe_.data(), O_CLOEXEC) != -1; + } + + bool IsPipeEmpty() const; + bool SendSample(const SampleInfo &sample) const; + bool ReadSample(SampleInfo *sample) const; + + NO_COPY_SEMANTIC(ThreadCommunicator); + NO_MOVE_SEMANTIC(ThreadCommunicator); + +private: + std::array listener_pipe_ {-1, -1}; + + friend class test::SamplerTest; +}; + +} // namespace panda::tooling::sampler + +#endif // PANDA_RUNTIME_TOOLING_SAMPLER_THREAD_COMMUNICATOR_H_ \ No newline at end of file diff --git a/runtime/tooling/tools.cpp b/runtime/tooling/tools.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d32d85ff5b3c86d064a6b560611054a4a934db06 --- /dev/null +++ b/runtime/tooling/tools.cpp @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "runtime/tooling/tools.h" +#include "runtime/tooling/sampler/sampling_profiler.h" + +namespace panda::tooling { + +sampler::Sampler *Tools::GetSamplingProfiler() +{ + // Singleton instance + return sampler_; +} + +void Tools::CreateSamplingProfiler() +{ + ASSERT(sampler_ == nullptr); + sampler_ = sampler::Sampler::Create(); +} + +bool Tools::StartSamplingProfiler(const std::string &aspt_filename, uint32_t interval) +{ + ASSERT(sampler_ != nullptr); + sampler_->SetSampleInterval(interval); + if (aspt_filename.empty()) { + std::time_t current_time = std::time(nullptr); + std::tm *local_time = std::localtime(¤t_time); + std::string aspt_filename_time = std::to_string(local_time->tm_hour) + "-" + + std::to_string(local_time->tm_min) + "-" + std::to_string(local_time->tm_sec) + + ".aspt"; + return sampler_->Start(aspt_filename_time.c_str()); + } + return sampler_->Start(aspt_filename.c_str()); +} + +void Tools::StopSamplingProfiler() +{ + ASSERT(sampler_ != nullptr); + sampler_->Stop(); + sampler::Sampler::Destroy(sampler_); + sampler_ = nullptr; +} + +} // namespace panda::tooling diff --git a/verification/type/type_systems.h b/runtime/tooling/tools.h similarity index 45% rename from verification/type/type_systems.h rename to runtime/tooling/tools.h index e57ba1412ef89333dc9b7e2bc0e1fd5bcb64c19e..97f2ec71bc82008fea8006862f88a8a64e4f00d6 100644 --- a/verification/type/type_systems.h +++ b/runtime/tooling/tools.h @@ -13,35 +13,34 @@ * limitations under the License. */ -#ifndef PANDA_TYPE_SYSTEMS_H__ -#define PANDA_TYPE_SYSTEMS_H__ +#include "libpandabase/macros.h" -#include "type_sort.h" -#include "type_tags.h" +#ifndef PANDA_RUNTIME_TOOLING_TOOLS_H_ +#define PANDA_RUNTIME_TOOLING_TOOLS_H_ -#include "verification/value/variables.h" +namespace panda::tooling { -#include "macros.h" +namespace sampler { +class Sampler; +} // namespace sampler -namespace panda::verifier { -class TypeSystem; -class Type; -class TypeParams; - -class TypeSystems { +class Tools { public: - static const PandaString &ImageOfType(const Type &type); - static PandaString ImageOfTypeParams(const TypeParams &type_params); - static SortIdx GetSort(TypeSystemKind kind, ThreadNum threadnum, const PandaString &name); - static TypeSystem &Get(TypeSystemKind kind, ThreadNum threadnum); + Tools() = default; + ~Tools() = default; - static void Initialize(size_t numThreads); - static void Destroy(); + void CreateSamplingProfiler(); + sampler::Sampler *GetSamplingProfiler(); + bool StartSamplingProfiler(const std::string &aspt_filename, uint32_t interval); + void StopSamplingProfiler(); private: - struct Impl; - inline static Impl *impl {nullptr}; + NO_COPY_SEMANTIC(Tools); + NO_MOVE_SEMANTIC(Tools); + + sampler::Sampler *sampler_ {nullptr}; }; -} // namespace panda::verifier -#endif // !PANDA_TYPE_SYSTEMS_H__ +} // namespace panda::tooling + +#endif // PANDA_RUNTIME_TOOLING_TOOLS_H_ \ No newline at end of file diff --git a/templates/common.rb b/templates/common.rb index 5be8972c144af35ed7db1eb3bb4fb3ba6b3a08ee..5f1ec322ff93765425dc1133c720e78d16f84fda 100755 --- a/templates/common.rb +++ b/templates/common.rb @@ -150,7 +150,7 @@ class Event < SimpleDelegator def print_line qoute = '"' - line = 'events_file' + line = 'events_file_' delim = ' << ' fields.each do |field| line.concat(delim, qoute, field.name, qoute) diff --git a/templates/events/events.h.erb b/templates/events/events.h.erb index 9dca9e8dec7303d020217f7d0203d232d0586464..152ce546e21c638978a7dcf5205d296a313d0f37 100644 --- a/templates/events/events.h.erb +++ b/templates/events/events.h.erb @@ -35,21 +35,21 @@ public: } static void Init(const std::string& file_path) { - EventWriter::events_file.open(file_path, std::ios::out); + EventWriter::events_file_.open(file_path, std::ios::out); } % Common::events.each do |op| void <%= op.method_name %>(<%= op.args_list %>) { - events_file << class_name_.data() << "::" << method_name_.data() << ',' << "<%= op.name %>" << ','; + events_file_ << class_name_.data() << "::" << method_name_.data() << ',' << "<%= op.name %>" << ','; <%= op.print_line %>; - events_file << std::endl; + events_file_ << std::endl; } % end public: // TODO (a.popov) synchronize object in multithreaded mode // NOLINTNEXTLINE(cert-err58-cpp,fuchsia-statically-constructed-objects) - static inline std::ofstream events_file; + static inline std::ofstream events_file_; private: static constexpr size_t BUF_SIZE = 512; diff --git a/templates/options/options.h.erb b/templates/options/options.h.erb index d20423f68cd1920c2060b9b073c86d73fb8f123d..968ae337eb9843ede7e904cc75e103a293103569 100644 --- a/templates/options/options.h.erb +++ b/templates/options/options.h.erb @@ -42,7 +42,7 @@ public: explicit Options(const std::string &exe_path) : exe_dir_(GetExeDir(exe_path)) {} void AddOptions(PandArgParser *parser) { - parser->Add(&version); + parser->Add(&version_); % Common::options.each do |op| % next if op.sub_option? % next if op.deprecated? @@ -51,15 +51,15 @@ public: } bool IsVersion() const { - return version.GetValue(); + return version_.GetValue(); } void SetVersion(bool value) { - version.SetValue(value); + version_.SetValue(value); } bool WasSetVersion() { - return version.WasSet(); + return version_.WasSet(); } % Common::options.each do |op| @@ -165,7 +165,7 @@ private: % end std::string exe_dir_; - PandArg version{"version", false, R"(Ark version, file format version and minimum supported file format version)"}; + PandArg version_{"version", false, R"(Ark version, file format version and minimum supported file format version)"}; % Common::options.each do |op| % if defined? op.delimiter PandArg<<%= op.type %>> <%= op.field_name %>{"<%= op.name %>", <%= op.default_value %>, <%= op.full_description %>, "<%= op.delimiter %>"}; diff --git a/tests/cts-generator/CMakeLists.txt b/tests/cts-generator/CMakeLists.txt index 246196b3451632eb64e407dab4018ebb823d3879..cd960b31d0f10e2c55cbe95206390ea535c3918a 100644 --- a/tests/cts-generator/CMakeLists.txt +++ b/tests/cts-generator/CMakeLists.txt @@ -164,7 +164,6 @@ if(NOT CMAKE_CROSSCOMPILING) -p "${PROJECT_BINARY_DIR}/../../" -x "${CTS_TEMP_DIR}" -v 1 - -e irtoc_ignore -o "--interpreter-type=irtoc --gc-type=g1-gc" --with-quickener --global-timeout ${PANDA_CTS_TESTING_TIMEOUT} diff --git a/tests/cts-generator/cts-template/divu2.64.yaml b/tests/cts-generator/cts-template/divu2.64.yaml index 80ba90da5d4c89e13f4d29f0d408fcf5fe032752..7562d56b052e118b945cb236d0259b12931a3d6b 100755 --- a/tests/cts-generator/cts-template/divu2.64.yaml +++ b/tests/cts-generator/cts-template/divu2.64.yaml @@ -66,7 +66,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 1 @@ -160,7 +160,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 0x7FFFFFFFFFFFFFFF @@ -207,7 +207,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 -0x8000000000000000 diff --git a/tests/cts-generator/cts-template/divu2.yaml b/tests/cts-generator/cts-template/divu2.yaml index 66a4b64f7019705f5803bf51cf8e839ad09db795..9f918cc9cc811a8c904b625924f1f43c8e06f3aa 100755 --- a/tests/cts-generator/cts-template/divu2.yaml +++ b/tests/cts-generator/cts-template/divu2.yaml @@ -66,7 +66,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai 1 @@ -160,7 +160,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai 0x7FFFFFFF @@ -207,7 +207,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai -0x80000000 diff --git a/tests/cts-generator/cts-template/initobj.range.yaml b/tests/cts-generator/cts-template/initobj.range.yaml index b0b6611ea8451cc8b68e3a1d8275e935255b71f6..2b85ff8bca4021fefc7e9a78e5aafb2f9a460bba 100755 --- a/tests/cts-generator/cts-template/initobj.range.yaml +++ b/tests/cts-generator/cts-template/initobj.range.yaml @@ -1158,7 +1158,7 @@ tests: default values (i.e. 0 for primitives and null for objects), call specified initializer and put a reference to the newly created object into accumulator. method_id should resolve to an initializer. description: Check 'initobj.range' with valid method id creates expected instance type. - tags: ['irtoc_ignore'] + tags: [] header-template: [] code-template: | .record panda.Object @@ -1167,27 +1167,27 @@ tests: .function void R.ctor1(R a0, i32 a1) { return.void } - .function R[] R.ctor1x(R a0, i32 a1) { + .function void R.ctor1x(R a0, i32 a1) { movi v0, 10 newarr v0, v0, R[] lda.obj v0 - return.obj + return.void } .function void R.ctor2(R a0, i16 a1, R a2) { return.void } - .function R R.ctor2x(R a0, i16 a1, R a2) { + .function void R.ctor2x(R a0, i16 a1, R a2) { lda.null - return.obj + return.void } .function void R.ctor3(R a0, i32[] a1, R[] a2, i8 a3) { return.void } - .function panda.Object[] R.ctor3x(R a0, i32[] a1, R[] a2, i8 a3) { + .function void R.ctor3x(R a0, i32[] a1, R[] a2, i8 a3) { movi v0, 10 newarr v0, v0, panda.Object[] lda.obj v0 - return.obj + return.void } .function void R.ctor4(R a0, R[][] a1, R[] a2, i32 a3, f64 a4) { return.void diff --git a/tests/cts-generator/cts-template/initobj.short.yaml b/tests/cts-generator/cts-template/initobj.short.yaml index 13c2e4ed5f6afd49b4f3caa2480cfce6a13d3341..4571452fe50e92b52759ac8b44eb27b039b3be4e 100755 --- a/tests/cts-generator/cts-template/initobj.short.yaml +++ b/tests/cts-generator/cts-template/initobj.short.yaml @@ -1044,7 +1044,7 @@ tests: default values (i.e. 0 for primitives and null for objects), call specified initializer and put a reference to the newly created object into accumulator. method_id should resolve to an initializer. description: Check 'initobj.short' with valid method id creates expected instance type. - tags: ['irtoc_ignore'] + tags: [] header-template: [] code-template: | .record panda.Object @@ -1053,25 +1053,25 @@ tests: .function void R.ctor0(R a0) { return.void } - .function R R.ctor0x(R a0) { + .function void R.ctor0x(R a0) { lda.null - return.obj + return.void } .function void R.ctor1(R a0, i32 a1) { return.void } - .function R[] R.ctor1x(R a0, i32 a1) { + .function void R.ctor1x(R a0, i32 a1) { movi v0, 10 newarr v0, v0, R[] lda.obj v0 - return.obj + return.void } .function void R.ctor2(R a0, R a1, i16 a2) { return.void } - .function R R.ctor2x(R a0, R a1, i16 a2) { + .function void R.ctor2x(R a0, R a1, i16 a2) { lda.null - return.obj + return.void } .record panda.NullPointerException .function void panda.NullPointerException.ctor(panda.NullPointerException a0, panda.String a1, panda.Object a2) diff --git a/tests/cts-generator/cts-template/initobj.yaml b/tests/cts-generator/cts-template/initobj.yaml index 566231e4c8c32cf90aa6e51fda5855eae777dcb1..f2474c37dba85d8e7c6eb08b9c374ce1a8c8bd4e 100755 --- a/tests/cts-generator/cts-template/initobj.yaml +++ b/tests/cts-generator/cts-template/initobj.yaml @@ -1070,7 +1070,7 @@ tests: default values (i.e. 0 for primitives and null for objects), call specified initializer and put a reference to the newly created object into accumulator. method_id should resolve to an initializer. description: Check 'initobj' with valid method id creates expected instance type. - tags: ['irtoc_ignore'] + tags: [] header-template: [] code-template: | .record panda.Object @@ -1079,34 +1079,34 @@ tests: .function void R.ctor0(R a0) { return.void } - .function R R.ctor0x(R a0) { + .function void R.ctor0x(R a0) { lda.null - return.obj + return.void } .function void R.ctor1(R a0, i32 a1) { return.void } - .function R[] R.ctor1x(R a0, i32 a1) { + .function void R.ctor1x(R a0, i32 a1) { movi v0, 10 newarr v0, v0, R[] lda.obj v0 - return.obj + return.void } .function void R.ctor2(R a0, R a1, i16 a2) { return.void } - .function R R.ctor2x(R a0, R a1, i16 a2) { + .function void R.ctor2x(R a0, R a1, i16 a2) { lda.null - return.obj + return.void } .function void R.ctor3(R a0, i32[] a1, R[] a2, i8 a3) { return.void } - .function panda.Object[] R.ctor3x(R a0, i32[] a1, R[] a2, i8 a3) { + .function void R.ctor3x(R a0, i32[] a1, R[] a2, i8 a3) { movi v0, 10 newarr v0, v0, panda.Object[] lda.obj v0 - return.obj + return.void } .function void R.ctor4(R a0, R[][] a1, i64 a2, panda.Object a3, f64 a4) { return.void diff --git a/tests/cts-generator/cts-template/lda.type.yaml b/tests/cts-generator/cts-template/lda.type.yaml index a03290dff5e2c63580301fdd9155fab0bdd7a095..99f7b6b3ee3aa7d69d52cdffb4191f0a481e5e67 100755 --- a/tests/cts-generator/cts-template/lda.type.yaml +++ b/tests/cts-generator/cts-template/lda.type.yaml @@ -24,7 +24,7 @@ tests: title: Load accumulator from type constant pool description: Load type specified by id into accumulator. verification: - - type_id_any_object + - type_id_class instructions: - sig: lda.type type_id acc: out:ref diff --git a/tests/cts-generator/cts-template/ldobj.v.yaml b/tests/cts-generator/cts-template/ldobj.v.yaml index 13aed45e92927c3e3bd87208a5c7d0743c87778a..48360ad0ad12959c7950cf3d2e78cd5fe6aaf060 100755 --- a/tests/cts-generator/cts-template/ldobj.v.yaml +++ b/tests/cts-generator/cts-template/ldobj.v.yaml @@ -344,7 +344,7 @@ tests: If field type is less than 32, then loaded value is sign or zero extended to i32 depending on field type. header-template: ['pandasm_header'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { diff --git a/tests/cts-generator/cts-template/ldobj.yaml b/tests/cts-generator/cts-template/ldobj.yaml index f1a45aa6f8d91ed7b2b71310c4bb5ccce05074e6..8bb7baec043b24db36c768dccd445e3c144975de 100755 --- a/tests/cts-generator/cts-template/ldobj.yaml +++ b/tests/cts-generator/cts-template/ldobj.yaml @@ -358,7 +358,7 @@ tests: header-template: ['pandasm_header'] runner-options: ['verifier-config'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { initobj.short R.ctor diff --git a/tests/cts-generator/cts-template/modu2.64.yaml b/tests/cts-generator/cts-template/modu2.64.yaml index 06f261f77787a6e1ec711f2241605e23c8131236..41ba2762648cd56fcc9f90a8b1ee68bc85a72c88 100755 --- a/tests/cts-generator/cts-template/modu2.64.yaml +++ b/tests/cts-generator/cts-template/modu2.64.yaml @@ -66,7 +66,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 1 @@ -160,7 +160,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 0x7FFFFFFFFFFFFFFF @@ -207,7 +207,7 @@ tests: acc: inout:u64 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai.64 -0x8000000000000000 diff --git a/tests/cts-generator/cts-template/modu2.yaml b/tests/cts-generator/cts-template/modu2.yaml index 456c5f1efafe2c889ee7e149d181c61cf4e982dc..3f686e2c94549f52c558174a46b7aa1290bb1c0b 100755 --- a/tests/cts-generator/cts-template/modu2.yaml +++ b/tests/cts-generator/cts-template/modu2.yaml @@ -66,7 +66,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai 1 @@ -160,7 +160,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai 0x7FFFFFFF @@ -207,7 +207,7 @@ tests: acc: inout:u32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] code-template: | # ldai -0x80000000 diff --git a/tests/cts-generator/cts-template/stobj.v.yaml b/tests/cts-generator/cts-template/stobj.v.yaml index 9ed5c17331d9d1d16a011e7fac740c7849ed4d41..cbf0ff293671e1e3fcbc0deb4ef7d8e56ebd7b04 100755 --- a/tests/cts-generator/cts-template/stobj.v.yaml +++ b/tests/cts-generator/cts-template/stobj.v.yaml @@ -429,7 +429,7 @@ tests: description: If field type size is less than 32, register content will be truncated to storage size before storing. header-template: ['pandasm_header'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { movi v7, 123456789 @@ -563,7 +563,7 @@ tests: description: If field type size is less than 32, register content will be truncated to storage size before storing. header-template: ['pandasm_header'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { movi v7, 123456789 diff --git a/tests/cts-generator/cts-template/stobj.yaml b/tests/cts-generator/cts-template/stobj.yaml index 46b56d6f228e2aff3cce3a74647eea4ebd73b478..3483fd12b1da4ebc56d4ea2703de6c0c32fd4871 100755 --- a/tests/cts-generator/cts-template/stobj.yaml +++ b/tests/cts-generator/cts-template/stobj.yaml @@ -423,7 +423,7 @@ tests: description: If field type size is less than 32, accumulator content will be truncated to storage size before storing. header-template: ['pandasm_header'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { initobj R.ctor @@ -492,7 +492,7 @@ tests: description: If field type size is less than 32, accumulator content will be truncated to storage size before storing. header-template: ['pandasm_header'] check-type: exit-positive - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | .function i32 main() { initobj R.ctor diff --git a/tests/cts-generator/cts-template/ucmp.64.yaml b/tests/cts-generator/cts-template/ucmp.64.yaml index 824041358dff69b14ec12901ffac73048d0a70e4..af0665fb480952c33dbb78ebe153b7df7b52b196 100755 --- a/tests/cts-generator/cts-template/ucmp.64.yaml +++ b/tests/cts-generator/cts-template/ucmp.64.yaml @@ -58,7 +58,7 @@ tests: acc: inout:u64->i32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] check-type: check-positive description: Check ucmp.64 with different values. code-template: | @@ -96,7 +96,7 @@ tests: format: [pref_op_v_8] check-type: check-positive description: Check ucmp.64 with different values. - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] code-template: | # test - check greater than ldai.64 %s diff --git a/tests/cts-generator/cts-template/ucmp.yaml b/tests/cts-generator/cts-template/ucmp.yaml index cf63d1589a1c45a8ba38274db4ecb3a60839a6ad..5beaa7b720d2f23ceb8314874851f5b888336df3 100755 --- a/tests/cts-generator/cts-template/ucmp.yaml +++ b/tests/cts-generator/cts-template/ucmp.yaml @@ -58,7 +58,7 @@ tests: acc: inout:u32->i32 prefix: unsigned format: [pref_op_v_8] - tags: ['irtoc_ignore'] + tags: [] check-type: check-positive code-template: | # test - check less than @@ -99,7 +99,7 @@ tests: ucmp v0 subi 1 description: Check ucmp with different values. - tags: ['tsan', 'irtoc_ignore'] + tags: ['tsan'] cases: - values: - '0x00000001' diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3f59838a9987da04c344c6b2143cecf0f36eab1 --- /dev/null +++ b/tools/CMakeLists.txt @@ -0,0 +1,18 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.14.1 FATAL_ERROR) + +# ----- Sampling profiler tools ----------------------------------------------- + +add_subdirectory(sampler) diff --git a/tools/sampler/CMakeLists.txt b/tools/sampler/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..916256eb540976fdfee497c0bbbace26acd9e463 --- /dev/null +++ b/tools/sampler/CMakeLists.txt @@ -0,0 +1,30 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.3.2 FATAL_ERROR) +project(aspt_converter CXX) + +panda_add_executable(aspt_converter aspt_convert.cpp) + +target_link_libraries(aspt_converter + arkfile + arkbase +) + +panda_add_sanitizers(TARGET aspt_converter SANITIZERS ${PANDA_SANITIZERS_LIST}) + +# Currently profiler is not supported for arm64 +# TODO(m.strizhak): support for device +if (NOT (PANDA_TARGET_ARM64 OR PANDA_TARGET_MOBILE OR PANDA_TARGET_OHOS OR PANDA_ENABLE_FUZZBENCH)) + add_subdirectory(tests) +endif() diff --git a/verification/type/type_params.h b/tools/sampler/aspt_convert.cpp similarity index 34% rename from verification/type/type_params.h rename to tools/sampler/aspt_convert.cpp index 96def38fc55c36886b8ab2bf83cc4f612de6243d..0363631a558727e27a14164f03d378b14f41673e 100644 --- a/verification/type/type_params.h +++ b/tools/sampler/aspt_convert.cpp @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2021-2022 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,50 +13,45 @@ * limitations under the License. */ -#ifndef _PANDA_TYPE_PARAMS_HPP__ -#define _PANDA_TYPE_PARAMS_HPP__ +#include "libpandabase/utils/logger.h" +#include "tools/sampler/aspt_converter.h" -#include "type_param.h" +namespace panda::tooling::sampler { -namespace panda::verifier { -class TypeSystem; - -class TypeParams : public TypeParamsIdx { - friend class Type; - friend class TypeParam; - friend class ParametricType; - -public: - TypeParams(TypeSystemKind kind, ThreadNum threadnum, const TypeParamsIdx ¶ms = {}) - : TypeParamsIdx {params}, kind_ {kind}, threadnum_ {threadnum} - { +int Main(int argc, const char **argv) +{ + panda::Span sp(argv, argc); + if (sp.Size() != 3) { + std::cerr << "Error: Wrong amount of arguments." << std::endl + << "Usage: " << sp[0] << " input.aspt output" << std::endl; + return 1; } - TypeParams() = default; - TypeParams(const TypeParams &) = default; - TypeParams(TypeParams &&) = default; - TypeParams &operator=(const TypeParams &) = default; - TypeParams &operator=(TypeParams &&) = default; - ~TypeParams() = default; + const char *filename = sp[1]; + const char *out_filename = sp[2]; - bool operator<=(const TypeParams &rhs) const; + if (!panda::os::IsFileExists(filename)) { + std::cerr << "Error: file \"" << filename << "\" not found." << std::endl; + return 1; + } - TypeParams &operator>>(const TypeParam &p); + Logger::InitializeStdLogging(Logger::Level::INFO, Logger::ComponentMaskFromString("profiler")); - template - void ForEach(Handler &&handler) const - { - for (const auto &p : *this) { - handler(TypeParam {kind_, threadnum_, p}); - } + AsptConverter conv(filename); + if (conv.CollectTracesStats() == 0) { + LOG(ERROR, PROFILER) << "No samples found in file"; + return 1; } + if (!conv.CollectModules()) { + LOG(ERROR, PROFILER) << "No modules found in file, names would not be resolved"; + } + conv.DumpResolvedTracesAsCSV(out_filename); + return 0; +} - TypeSystem &GetTypeSystem() const; - -private: - TypeSystemKind kind_; - ThreadNum threadnum_; -}; -} // namespace panda::verifier +} // namespace panda::tooling::sampler -#endif // !_PANDA_TYPE_PARAMS_HPP__ +int main(int argc, const char **argv) +{ + return panda::tooling::sampler::Main(argc, argv); +} \ No newline at end of file diff --git a/tools/sampler/aspt_converter.h b/tools/sampler/aspt_converter.h new file mode 100644 index 0000000000000000000000000000000000000000..8d3d231c6e3f6d5286757f2c6af842c31c1ab4f6 --- /dev/null +++ b/tools/sampler/aspt_converter.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_TOOLS_SAMPLER_CONVERTER_IMPL_H +#define PANDA_TOOLS_SAMPLER_CONVERTER_IMPL_H + +#include + +#include "libpandabase/os/filesystem.h" +#include "libpandabase/utils/utf.h" +#include "libpandafile/class_data_accessor-inl.h" +#include "libpandafile/file-inl.h" + +#include "runtime/tooling/sampler/sample_info.h" +#include "runtime/tooling/sampler/sample_reader-inl.h" + +namespace panda::tooling::sampler { + +class AsptConverter { +public: + using StackTraceMap = std::unordered_map; + using ModuleMap = std::unordered_map>; + + explicit AsptConverter(const char *filename) : reader_(filename) {} + ~AsptConverter() = default; + + const StackTraceMap &GetStackTraces() const + { + return stack_traces_; + } + + size_t CollectTracesStats() + { + stack_traces_.clear(); + + size_t sample_counter = 0; + SampleInfo sample; + while (reader_.GetNextSample(&sample)) { + ++sample_counter; + auto it = stack_traces_.find(sample); + if (it == stack_traces_.end()) { + stack_traces_.insert({sample, 1}); + continue; + } + ++it->second; + } + + return sample_counter; + } + + bool CollectModules() + { + // TODO(m.strizhak): get alternative paths to find panda files + FileInfo m_info; + while (reader_.GetNextModule(&m_info)) { + const char *filepath = m_info.pathname.c_str(); + if (!panda::os::IsFileExists(filepath)) { + LOG(ERROR, PROFILER) << "Module not found, path: " << filepath; + } + if (modules_map_.find(m_info.ptr) == modules_map_.end()) { + modules_map_.insert({m_info.ptr, panda_file::OpenPandaFileOrZip(filepath)}); + } + } + + return !modules_map_.empty(); + } + + bool DumpResolvedTracesAsCSV(const char *filename) + { + std::ofstream stream(filename); + for (auto &[sample, count] : stack_traces_) { + ASSERT(sample.managed_stack_size <= SampleInfo::MAX_STACK_DEPTH); + for (size_t i = sample.managed_stack_size; i-- > 0;) { + uintptr_t pf_id = sample.managed_stack[i].panda_file_ptr; + uint64_t file_id = sample.managed_stack[i].file_id; + const panda_file::File *pf = + modules_map_.find(pf_id) != modules_map_.end() ? modules_map_[pf_id].get() : nullptr; + const std::string full_method_name = ResolveName(pf, file_id); + stream << full_method_name << "; "; + } + stream << count << "\n"; + } + + return true; + } + + std::string ResolveName(const panda_file::File *pf, uint64_t file_id) + { + if (pf == nullptr) { + return std::string("__unknown_module::" + std::to_string(file_id)); + } + // TODO(m.strizhak): make a hash map to avoid of O(N^2) + std::string result; + auto classes_span = pf->GetClasses(); + for (auto id : classes_span) { + if (pf->IsExternal(panda_file::File::EntityId(id))) { + continue; + } + panda_file::ClassDataAccessor cda(*pf, panda_file::File::EntityId(id)); + cda.EnumerateMethods([&](panda_file::MethodDataAccessor &mda) { + if (panda_file::File::EntityId(file_id) == mda.GetMethodId()) { + std::string method_name = utf::Mutf8AsCString(mda.GetName().data); + std::string class_name = utf::Mutf8AsCString(cda.GetDescriptor()); + if (class_name[class_name.length() - 1] == ';') { + class_name.pop_back(); + } + result = class_name + "::" + method_name; + } + }); + } + if (result.empty()) { + return std::string(pf->GetFilename() + "::__unknown_" + std::to_string(file_id)); + } + return result; + } + + NO_COPY_SEMANTIC(AsptConverter); + NO_MOVE_SEMANTIC(AsptConverter); + +private: + ModuleMap modules_map_; + SampleReader reader_; + StackTraceMap stack_traces_; +}; + +} // namespace panda::tooling::sampler + +#endif // PANDA_TOOLS_SAMPLER_CONVERTER_IMPL_H \ No newline at end of file diff --git a/tools/sampler/tests/CMakeLists.txt b/tools/sampler/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0d24afc0e205c9fdd4a58723033a06175d51684 --- /dev/null +++ b/tools/sampler/tests/CMakeLists.txt @@ -0,0 +1,23 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 3.3.2 FATAL_ERROR) + +add_custom_target(sampler_functional_test_run + COMMENT "Testing panda with sampler and its tool to resolve traces" + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/run_sampler_and_verify_test.sh + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR} \"${PANDA_RUN_PREFIX}\" +) + +add_dependencies(sampler_functional_test_run ark pandasm aspt_converter) +add_dependencies(tests sampler_functional_test_run) \ No newline at end of file diff --git a/tools/sampler/tests/panda_app.pa b/tools/sampler/tests/panda_app.pa new file mode 100644 index 0000000000000000000000000000000000000000..355cb07eb7d4e35e1444c40c487de7100cb2b674 --- /dev/null +++ b/tools/sampler/tests/panda_app.pa @@ -0,0 +1,158 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.record Math +.record Globals{ + i32 flag +} + +.function f64 Math.pow(f64 a0, f64 a1) +.function f64 Math.cos(f64 a0) +.function f64 Math.sin(f64 a0) +.function f64 Math.absF64(f64 a0) + +.function u1 main(){ + movi v0, 1024 + movi v1, 163840 + fmovi.64 v2, 104.46302508421098 + call test, v0, v1, v2, v0 + return +} + +.function u1 test(i32 a0, i32 a1, f64 a2){ + ldai 0 + ststatic Globals.flag + fmovi.64 v3, 0.0 #total + mov v5, a0 #loop counter +loop: + jgt a1, loop_exit + call.short partial, v5, v0 + call.short partial2, v5, v0 + call.short partial3, v5, v0 + call.short partial4, v5, v0 + fadd2.64 v3 + sta.64 v3 + lda v5 + muli 2 + sta v5 + jmp loop +loop_exit: + return +} + +.function f64 partial(i32 a0){ + sta.64 v20 #twothirds + fmovi.64 v21, -1.0 #alt + fmovi.64 v22, 0.0 #k2 + fmovi.64 v23, 0.0 #k3 + fmovi.64 v24, 0.0 #sk + fmovi.64 v25, 0.0 #ck + movi.64 v27, 0 #flag + movi v28, 1 #loop counter + mov v26, a0 + ldai 1 +loop: + jgt v26, loop_exit + i32tof64 + sta.64 v29 + fmul2.64 v29 + sta.64 v30 #k2 + fmul2.64 v29 + sta.64 v31 #k3 + mov.64 v0, v29 + call.short Math.sin, v0, v0 + sta.64 v32 #sk + call.short Math.cos, v0, v0 + sta.64 v33 #ck + lda.64 v21 + fneg.64 + sta.64 v21 + mov.64 v0, v20 + fldai.64 -1.0 + fadd2.64 v29 + sta.64 v1 + call.short Math.pow, v0, v1 + fadd2.64 v11 + sta.64 v11 + mov.64 v0, v29 + call.short Math.pow, v0, v10 + fadd2.64 v12 + sta.64 v12 + fldai.64 1.0 + fadd2.64 v29 + fmul2.64 v29 + sta.64 v34 + fldai.64 1.0 + fdiv2.64 v34 + fadd2.64 v13 + sta.64 v13 + lda.64 v31 + fmul2.64 v32 + fmul2.64 v32 + sta.64 v34 + fldai.64 1.0 + fdiv2.64 v34 + fadd2.64 v14 + sta.64 v14 + lda.64 v31 + fmul2.64 v33 + fmul2.64 v33 + sta.64 v34 + fldai.64 1.0 + fdiv2.64 v34 + fadd2.64 v15 + sta.64 v15 + fldai.64 1.0 + fdiv2.64 v29 + fadd2.64 v16 + sta.64 v16 + fldai.64 1.0 + fdiv2.64 v30 + fadd2.64 v17 + sta.64 v17 + lda.64 v21 + fdiv2.64 v29 + fadd2.64 v18 + sta.64 v18 + fldai.64 2.0 + fmul2.64 v29 + sta.64 v34 + fldai.64 -1.0 + fadd2.64 v34 + sta.64 v34 + lda.64 v21 + fdiv2.64 v34 + fadd2.64 v19 + sta.64 v19 + lda v28 + addi 1 + sta v28 + jmp loop +loop_exit: + return.64 +} + +.function f64 partial2(i32 a0){ + call.short partial, a0, v0 + return.64 +} + +.function f64 partial3(i32 a0){ + call.short partial, a0, v0 + return.64 +} + +.function f64 partial4(i32 a0){ + call.short partial, a0, v0 + return.64 +} \ No newline at end of file diff --git a/tools/sampler/tests/run_sampler_and_verify_test.sh b/tools/sampler/tests/run_sampler_and_verify_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..8dc957f22ebcc672f8bc1e8184761f67c461191a --- /dev/null +++ b/tools/sampler/tests/run_sampler_and_verify_test.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +if [[ $# < 3 ]]; then + printf "Error: Invalid number of arguments\n" + printf "Usage: $0 \"\"\n" + exit 1 +fi + +BUILD_DIR=$1 +CURRENT_BUILD_DIR=$2 +CURRENT_SOURCE_DIR=$3 +PANDA_RUN_PREFIX=$4 + +cd ${CURRENT_BUILD_DIR} +${PANDA_RUN_PREFIX} ${BUILD_DIR}/bin/ark_asm ${CURRENT_SOURCE_DIR}/panda_app.pa ${CURRENT_BUILD_DIR}/sampling_app.abc + +${PANDA_RUN_PREFIX} ${BUILD_DIR}/bin/ark --compiler-enable-jit=false --sampling-profiler-enable --sampling-profiler-interval=200 \ + --sampling-profiler-output-file=outfile.aspt ${CURRENT_BUILD_DIR}/sampling_app.abc _GLOBAL::main +${PANDA_RUN_PREFIX} ${BUILD_DIR}/bin/aspt_converter outfile.aspt traceout.csv + +TRACES_ARR=("L_GLOBAL::main; L_GLOBAL::test; L_GLOBAL::partial;" + "L_GLOBAL::main; L_GLOBAL::test; L_GLOBAL::partial2;" + "L_GLOBAL::main; L_GLOBAL::test; L_GLOBAL::partial3;" + "L_GLOBAL::main; L_GLOBAL::test; L_GLOBAL::partial4;") + +# Verify that traces are correct +for ((i = 0; i < ${#TRACES_ARR[@]}; i++)) +do + if ! grep "${TRACES_ARR[$i]}" traceout.csv + then + printf "Failed: Required trace ${TRACES_ARR[$i]} not found\n" + exit 1 + fi +done + +if grep "__unknown" traceout.csv +then + printf "Failed: Unknown trace detected\n" + exit 1 +fi + +printf "All required traces found. Sampler test passed\n" \ No newline at end of file diff --git a/verification/Verification.cmake b/verification/Verification.cmake index 32a484c5ff69af8824fc49a1b0a0cc5c4c977b97..e015ab70b1d7cefc4129d11a03022be8e91b42c9 100644 --- a/verification/Verification.cmake +++ b/verification/Verification.cmake @@ -23,6 +23,8 @@ include(${VERIFICATION_SOURCES_DIR}/jobs/Jobs.cmake) include(${VERIFICATION_SOURCES_DIR}/cache/Cache.cmake) set(VERIFIER_SOURCES + ${VERIFICATION_SOURCES_DIR}/default_plugin.cpp + ${VERIFICATION_SOURCES_DIR}/plugins.cpp ${VERIFICATION_SOURCES_DIR}/verification_options.cpp ${VERIFICATION_SOURCES_DIR}/verifier_messages_data.cpp ${VERIFICATION_SOURCES_DIR}/public.cpp diff --git a/verification/VerifierPostPlugins.cmake b/verification/VerifierPostPlugins.cmake index 4c1d2af00f3d585f4eefd0e795f9b8eb52e01936..146a927fe65640f3fdff6b84da70d8831d458147 100644 --- a/verification/VerifierPostPlugins.cmake +++ b/verification/VerifierPostPlugins.cmake @@ -12,16 +12,16 @@ # limitations under the License. set(VERIFIER_INCLUDE_DIR ${PANDA_BINARY_ROOT}/verification/gen/include) -set(LANGSPEC_H ${VERIFIER_INCLUDE_DIR}/lang_specifics.h) +set(PLUGINS_GEN_INC ${VERIFIER_INCLUDE_DIR}/plugins_gen.inc) panda_gen_file( DATAFILE ${GEN_PLUGIN_OPTIONS_YAML} - TEMPLATE ${PANDA_ROOT}/verification/gen/templates/lang_specifics.h.erb - OUTPUTFILE ${LANGSPEC_H} + TEMPLATE ${PANDA_ROOT}/verification/gen/templates/plugins_gen.inc.erb + OUTPUTFILE ${PLUGINS_GEN_INC} REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb EXTRA_DEPENDENCIES plugin_options_merge ) -add_custom_target(verifier_plugin_gen DEPENDS ${LANGSPEC_H}) +add_custom_target(verifier_plugin_gen DEPENDS ${PLUGINS_GEN_INC}) add_dependencies(verifier verifier_plugin_gen) add_dependencies(arkruntime verifier_plugin_gen) diff --git a/verification/absint/abs_int_inl.cpp b/verification/absint/abs_int_inl.cpp index b4100d72cbabd9532894caa3d65a3f2d86836901..09cc804dcd82b71e2f9c39b4d7939b3d0f28c8de 100644 --- a/verification/absint/abs_int_inl.cpp +++ b/verification/absint/abs_int_inl.cpp @@ -55,7 +55,7 @@ PandaVector AbsIntInstructionHandler::SubtypesOf(const PandaVector & { PandaUnorderedSet set; for (const auto &type : types) { - type.ForAllSubtypes([&set](const auto &t) { + type.ForAllSubtypes(GetTypeSystem(), [&set](const auto &t) { set.insert(t); return true; }); @@ -67,7 +67,7 @@ PandaVector AbsIntInstructionHandler::SubtypesOf(std::initializer_list set; for (const auto &type : types) { - type.ForAllSubtypes([&set](const auto &t) { + type.ForAllSubtypes(GetTypeSystem(), [&set](const auto &t) { set.insert(t); return true; }); @@ -79,7 +79,7 @@ PandaVector AbsIntInstructionHandler::SupertypesOf(const PandaVector { PandaUnorderedSet set; for (const auto &type : types) { - type.ForAllSupertypes([&set](const auto &t) { + type.ForAllSupertypes(GetTypeSystem(), [&set](const auto &t) { set.insert(t); return true; }); @@ -91,7 +91,7 @@ PandaVector AbsIntInstructionHandler::SupertypesOf(std::initializer_list set; for (const auto &type : types) { - type.ForAllSupertypes([&set](const auto &t) { + type.ForAllSupertypes(GetTypeSystem(), [&set](const auto &t) { set.insert(t); return true; }); @@ -188,6 +188,16 @@ PandaTypes &AbsIntInstructionHandler::Types() return context_.Types(); } +TypeSystem const *AbsIntInstructionHandler::GetTypeSystem() +{ + return context_.GetTypeSystem(); +} + +Type AbsIntInstructionHandler::TypeOfClass(CachedClass const *klass) +{ + return context_.Types().TypeOf(*klass); +} + const Type &AbsIntInstructionHandler::ReturnType() { return context_.ReturnType(); diff --git a/verification/absint/abs_int_inl.h b/verification/absint/abs_int_inl.h index 5d50c910c559eb76398e16be13797525729654fd..eb7ba1530a553341c500be1bd2c3e27fd9170ee1 100644 --- a/verification/absint/abs_int_inl.h +++ b/verification/absint/abs_int_inl.h @@ -22,7 +22,6 @@ #include "include/mem/panda_containers.h" #include "include/method.h" #include "include/runtime.h" -#include "lang_specifics.h" #include "macros.h" #include "panda_types.h" #include "runtime/include/class.h" @@ -255,7 +254,7 @@ public: bool CheckTypes(const Type &type, const Container &tgt_types) { for (const auto &t : tgt_types) { - if (type <= t) { + if (IsSubtype(type, t, GetTypeSystem())) { return true; } } @@ -266,7 +265,7 @@ public: bool CheckTypes(const AbstractType &type, const Container &tgt_types) { for (const auto &t : tgt_types) { - if (type.ExistsType([&](auto type1) { return type1 <= t; })) { + if (type.ExistsType([&](auto type1) { return IsSubtype(type1, t, GetTypeSystem()); })) { return true; } } @@ -298,15 +297,15 @@ public: return false; } bool result = false; - auto handler = [&result, &type](const AbstractTypedValue &atv) { + auto handler = [&](const AbstractTypedValue &atv) { const auto &at = atv.GetAbstractType(); if (at.IsType()) { - if (at.GetType() <= type) { + if (IsSubtype(at.GetType(), type, GetTypeSystem())) { result = true; } } else if (at.IsTypeSet()) { at.GetTypeSet().ForAll([&](const Type &type_in_at) { - if (type_in_at <= type) { + if (IsSubtype(type_in_at, type, GetTypeSystem())) { result = true; } return !result; @@ -347,15 +346,12 @@ public: void SetAccAndOthersOfSameOrigin(const AbstractTypedValue &val); void SetAccAndOthersOfSameOrigin(const AbstractType &type); - template - AbstractTypedValue MkVal(T t) - { - return AbstractTypedValue {Types().TypeOf(t), context_.NewVar()}; - } - AbstractTypedValue MkVal(const AbstractType &t); PandaTypes &Types(); + TypeSystem const *GetTypeSystem(); + + Type TypeOfClass(CachedClass const *klass); const Type &ReturnType(); @@ -699,7 +695,7 @@ public: SET_STATUS_FOR_MSG(CannotResolveClassId, OK); return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); SetAcc(type); MoveToNextInst(); return true; @@ -719,7 +715,7 @@ public: status_ = VerificationStatus::ERROR; return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); SetReg(vd, type); MoveToNextInst(); return true; @@ -736,11 +732,15 @@ public: SET_STATUS_FOR_MSG(CannotResolveClassId, OK); return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); if (!type.IsValid()) { LOG(DEBUG, VERIFIER) << "LDA_TYPE type of class is not valid."; return false; } + if (type != Types().Class(CurrentJob.JobCachedMethod().GetSourceLang())) { + LOG(ERROR, VERIFIER) << "LDA_TYPE type must be Class."; + return false; + } auto lang = CurrentJob.JobCachedMethod().GetSourceLang(); SetAcc(Types().Class(lang)); MoveToNextInst(); @@ -2210,9 +2210,9 @@ public: } auto ref_type = Types().RefType(); auto &&arr_elt_type = GetArrayEltType(reg_type); - TypeSet subtypes_of_ref_type_in_arr_elt_type(Types().GetKind(), Types().GetThreadNum()); + TypeSet subtypes_of_ref_type_in_arr_elt_type {}; arr_elt_type.ForAllTypes([&](Type arr_elt_type1) { - if (arr_elt_type1 <= ref_type) { + if (IsSubtype(arr_elt_type1, ref_type, GetTypeSystem())) { subtypes_of_ref_type_in_arr_elt_type.Insert(arr_elt_type1); } return true; @@ -2347,7 +2347,7 @@ public: SET_STATUS_FOR_MSG(CannotResolveClassId, OK); return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); if (!type.IsValid()) { LOG(ERROR, VERIFIER) << "Verifier error: HandleNewarr type error"; status_ = VerificationStatus::ERROR; @@ -2356,7 +2356,7 @@ public: SHOW_MSG(DebugType) LOG_VERIFIER_DEBUG_TYPE(ImageOf(type)); END_SHOW_MSG(); - if (!(type <= Types().ArrayType())) { + if (!IsSubtype(type, Types().ArrayType(), GetTypeSystem())) { // TODO(vdyadov): implement StrictSubtypes function to not include ArrayType in output SHOW_MSG(ArrayOfNonArrayType) LOG_VERIFIER_ARRAY_OF_NON_ARRAY_TYPE(ImageOf(type), ImagesOf(SubtypesOf({Types().ArrayType()}))); @@ -2382,7 +2382,7 @@ public: status_ = VerificationStatus::ERROR; return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); if (!type.IsValid()) { LOG(ERROR, VERIFIER) << "Verifier error: HandleNewobj type error"; status_ = VerificationStatus::ERROR; @@ -2391,7 +2391,7 @@ public: SHOW_MSG(DebugType) LOG_VERIFIER_DEBUG_TYPE(ImageOf(type)); END_SHOW_MSG(); - if (!(type <= Types().ObjectType())) { + if (!IsSubtype(type, Types().ObjectType(), GetTypeSystem())) { SHOW_MSG(ObjectOfNonObjectType) LOG_VERIFIER_OBJECT_OF_NON_OBJECT_TYPE(ImageOf(type), ImagesOf(SubtypesOf({type}))); END_SHOW_MSG(); @@ -2419,7 +2419,7 @@ public: template bool CheckCallCtor(const CachedMethod &ctor, RegsFetcher regs) { - Type obj_type = Types().TypeOf(ctor.klass); + Type obj_type = TypeOfClass(&ctor.klass); // TODO(vdyadov): put under NDEBUG? { @@ -2539,7 +2539,7 @@ public: return {}; } - return Types().TypeOf(field->GetType()); + return TypeOfClass(&field->GetType()); } Type GetFieldObject() @@ -2550,7 +2550,7 @@ public: SET_STATUS_FOR_MSG(CannotResolveFieldId, OK); return {}; } - return Types().TypeOf(field->klass); + return TypeOfClass(&field->klass); } bool CheckFieldAccess(int reg_idx, Type expected_field_type, bool is_static) @@ -2592,7 +2592,8 @@ public: SET_STATUS_FOR_MSG(AlwaysNpe, OK); return false; } - if (!obj_type.ExistsType([&](Type obj_type1) { return obj_type1 <= field_obj_type; })) { + if (!obj_type.ExistsType( + [&](Type obj_type1) { return IsSubtype(obj_type1, field_obj_type, GetTypeSystem()); })) { SHOW_MSG(InconsistentRegisterAndFieldTypes) LOG_VERIFIER_INCONSISTENT_REGISTER_AND_FIELD_TYPES(field->GetName(), reg_idx, ImageOf(obj_type), ImageOf(field_obj_type), @@ -2602,7 +2603,7 @@ public: } } - if (!(field_type <= expected_field_type)) { + if (!IsSubtype(field_type, expected_field_type, GetTypeSystem())) { SHOW_MSG(UnexpectedFieldType) LOG_VERIFIER_UNEXPECTED_FIELD_TYPE(field->GetName(), ImageOf(field_type), ImageOf(expected_field_type), ImagesOf(SubtypesOf({expected_field_type}))); @@ -2611,7 +2612,9 @@ public: return false; } - auto result = CheckFieldAccessViolation(field, CurrentJob, Types()); + auto *plugin = CurrentJob.JobPlugin(); + auto &job_method = CurrentJob.JobCachedMethod(); + auto result = plugin->CheckFieldAccessViolation(field, &job_method, &Types()); if (!result.IsOk()) { const auto &verif_opts = Runtime::GetCurrent()->GetVerificationConfig()->opts_; if (verif_opts.Debug.Allow.FieldAccessViolation && result.IsError()) { @@ -2879,7 +2882,7 @@ public: const AbstractType &vs_type = GetRegType(vs); - if (vs_type.ForAllTypes([&](Type vs_type1) { return !(vs_type1 <= field_type); })) { + if (vs_type.ForAllTypes([&](Type vs_type1) { return !IsSubtype(vs_type1, field_type, GetTypeSystem()); })) { SHOW_MSG(BadAccumulatorType) LOG_VERIFIER_BAD_ACCUMULATOR_TYPE(ImageOf(vs_type), ImageOf(field_type), ImagesOf(SubtypesOf({field_type}))); @@ -3136,12 +3139,13 @@ public: if (cached_class == nullptr) { return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); if (!type.IsValid()) { return false; } LOG_VERIFIER_DEBUG_TYPE(ImageOf(type)); - if (!(type <= Types().ObjectType()) && !(type <= Types().ArrayType())) { + if (!IsSubtype(type, Types().ObjectType(), GetTypeSystem()) && + !IsSubtype(type, Types().ArrayType(), GetTypeSystem())) { LOG_VERIFIER_CHECK_CAST_TO_NON_OBJECT_TYPE(ImageOf(type), ImagesOf(SubtypesOf({Types().ObjectType()}))); status_ = VerificationStatus::ERROR; return false; @@ -3152,15 +3156,18 @@ public: } auto acc_type = GetAccType(); // TODO(vdyadov): remove this check after #2365 - auto res = acc_type.ForAllTypes( - [&](Type acc_type1) { return !(acc_type1 <= Types().RefType() || acc_type1 <= Types().ArrayType()); }); + auto res = acc_type.ForAllTypes([&](Type acc_type1) { + return !IsSubtype(acc_type1, Types().RefType(), GetTypeSystem()) && + !IsSubtype(acc_type1, Types().ArrayType(), GetTypeSystem()); + }); if (res) { LOG_VERIFIER_NON_OBJECT_ACCUMULATOR_TYPE(); status_ = VerificationStatus::ERROR; return false; } - if (acc_type.ForAllTypes([&](Type acc_type1) { return acc_type1 <= Types().NullRefType(); })) { + if (acc_type.ForAllTypes( + [&](Type acc_type1) { return IsSubtype(acc_type1, Types().NullRefType(), GetTypeSystem()); })) { LOG_VERIFIER_ACCUMULATOR_ALWAYS_NULL(); status_ = VerificationStatus::WARNING; // Don't set types for "others of the same origin" when origin is null: n = null, a = n, b = n, a = @@ -3168,16 +3175,18 @@ public: SetAcc(type); MoveToNextInst(); return true; - } else if (acc_type.ForAllTypes([&](Type acc_type1) { return acc_type1 <= type; })) { + } else if (acc_type.ForAllTypes([&](Type acc_type1) { return IsSubtype(acc_type1, type, GetTypeSystem()); })) { LOG_VERIFIER_REDUNDANT_CHECK_CAST(ImageOf(acc_type), ImageOf(type)); status_ = VerificationStatus::WARNING; // Do not update register type to parent type as we loose details and can get errors on further flow MoveToNextInst(); return true; - } else if (type <= Types().ArrayType()) { + } else if (IsSubtype(type, Types().ArrayType(), GetTypeSystem())) { auto &&elt_type = GetArrayEltType(type); - res = acc_type.ForAllTypes( - [&](auto acc_type1) { return !(acc_type1 <= Types().ArrayType() || type <= acc_type1); }); + res = acc_type.ForAllTypes([&](auto acc_type1) { + return !IsSubtype(acc_type1, Types().ArrayType(), GetTypeSystem()) && + !IsSubtype(type, acc_type1, GetTypeSystem()); + }); if (res) { // TODO(vdyadov): add here accounting of array elt subtyping in possible types LOG_VERIFIER_IMPOSSIBLE_CHECK_CAST(ImageOf(acc_type), ImagesOf(SubSupTypesOf(type))); @@ -3186,7 +3195,8 @@ public: res = acc_type.ForAllTypes([&](Type acc_type1) { if (IsConcreteArrayType(acc_type1)) { auto &&acc_elt_type = GetArrayEltType(acc_type1); - return !(acc_elt_type <= elt_type || elt_type <= acc_elt_type); + return !IsSubtype(acc_elt_type, elt_type, GetTypeSystem()) && + !IsSubtype(elt_type, acc_elt_type, GetTypeSystem()); } else { return true; } @@ -3197,7 +3207,7 @@ public: status_ = VerificationStatus::WARNING; } } - } else if (acc_type.ForAllTypes([&](Type acc_type1) { return !(type <= acc_type1); })) { + } else if (acc_type.ForAllTypes([&](Type acc_type1) { return !IsSubtype(type, acc_type1, GetTypeSystem()); })) { // NB: accumulator may be checked several times via checkcast and interface types, // so incompatibility here should be just a warning // type in acc and given type should be on same line in type hierarchy @@ -3228,12 +3238,13 @@ public: if (cached_class == nullptr) { return false; } - auto type = Types().TypeOf(*cached_class); + auto type = TypeOfClass(cached_class); if (!type.IsValid()) { return false; } LOG_VERIFIER_DEBUG_TYPE(ImageOf(type)); - if (!(type <= Types().ObjectType()) && !(type <= Types().ArrayType())) { + if (!IsSubtype(type, Types().ObjectType(), GetTypeSystem()) && + !IsSubtype(type, Types().ArrayType(), GetTypeSystem())) { // !(type <= Types().ArrayType()) is redundant, because all arrays // are subtypes of either panda.Object <: ObjectType or java.lang.Object <: ObjectType // depending on selected language context @@ -3246,7 +3257,9 @@ public: return false; } - auto result = CheckClassAccessViolation(cached_class, CurrentJob, Types()); + auto *plugin = CurrentJob.JobPlugin(); + auto &job_method = CurrentJob.JobCachedMethod(); + auto result = plugin->CheckClassAccessViolation(cached_class, &job_method, &Types()); if (!result.IsOk()) { LogInnerMessage(CheckResult::protected_class); LOG_VERIFIER_DEBUG_CALL_FROM_TO(CurrentJob.JobCachedMethod().klass.GetName(), cached_class->GetName()); @@ -3256,27 +3269,31 @@ public: auto acc_type = GetAccType(); // TODO(vdyadov): remove this check after #2365 - auto res = acc_type.ForAllTypes( - [&](Type acc_type1) { return !(acc_type1 <= Types().RefType() || acc_type1 <= Types().ArrayType()); }); + auto res = acc_type.ForAllTypes([&](Type acc_type1) { + return !IsSubtype(acc_type1, Types().RefType(), GetTypeSystem()) && + !IsSubtype(acc_type1, Types().ArrayType(), GetTypeSystem()); + }); if (res) { LOG_VERIFIER_NON_OBJECT_ACCUMULATOR_TYPE(); status_ = VerificationStatus::ERROR; return false; } - if (acc_type.ForAllTypes([&](Type acc_type1) { return acc_type1 <= Types().NullRefType(); })) { + if (acc_type.ForAllTypes( + [&](Type acc_type1) { return IsSubtype(acc_type1, Types().NullRefType(), GetTypeSystem()); })) { LOG_VERIFIER_ACCUMULATOR_ALWAYS_NULL(); status_ = VerificationStatus::WARNING; - } else if (acc_type.ForAllTypes([&](Type acc_type1) { return acc_type1 <= type; })) { + } else if (acc_type.ForAllTypes([&](Type acc_type1) { return IsSubtype(acc_type1, type, GetTypeSystem()); })) { LOG_VERIFIER_REDUNDANT_IS_INSTANCE(ImageOf(acc_type), ImageOf(type)); status_ = VerificationStatus::WARNING; - } else if (type <= Types().ArrayType()) { + } else if (IsSubtype(type, Types().ArrayType(), GetTypeSystem())) { auto &&elt_type = GetArrayEltType(type); auto &&acc_elt_type = GetArrayEltType(acc_type); bool acc_elt_type_is_empty = true; res = acc_elt_type.ForAllTypes([&](Type acc_elt_type1) { acc_elt_type_is_empty = false; - return !(acc_elt_type1 <= elt_type || elt_type <= acc_elt_type1); + return !IsSubtype(acc_elt_type1, elt_type, GetTypeSystem()) && + !IsSubtype(elt_type, acc_elt_type1, GetTypeSystem()); }); if (res) { if (acc_elt_type_is_empty) { @@ -3287,7 +3304,7 @@ public: } status_ = VerificationStatus::WARNING; } - } else if (acc_type.ForAllTypes([&](Type acc_type1) { return !(type <= acc_type1); })) { + } else if (acc_type.ForAllTypes([&](Type acc_type1) { return !IsSubtype(type, acc_type1, GetTypeSystem()); })) { // type in acc and given type should be on same line in type hierarchy // ACC may be a supertype of given type, because of impresicion of absint, // real type in ACC during execution may be a subtype of ACC type during absint @@ -3324,12 +3341,12 @@ public: SET_STATUS_FOR_MSG(UndefinedRegister, WARNING); return result = false; } - Type formal_type {Types().TypeOf(formal_param)}; - Type norm_type {Types().TypeOf(norm_param)}; + Type formal_type {formal_param.GetType()}; + Type norm_type {norm_param.GetType()}; ASSERT(formal_param.Variance() == norm_param.Variance()); const AbstractType &actual_type = reg_num == INVALID_REG ? std::get<0x1>(reg_and_type) : GetRegType(reg_num); - TypeSet norm_actual_type(Types().GetKind(), Types().GetThreadNum()); + TypeSet norm_actual_type; actual_type.ForAllTypes([&](Type actual_type1) { norm_actual_type.Insert(Types().NormalizedTypeOf(actual_type1)); return true; @@ -3337,9 +3354,12 @@ public: // arg: NormalizedTypeOf(actual_type) <= norm_type // check of physical compatibility bool incompatible_types = false; - auto res = actual_type.ExistsType( - [&](auto actual_type1) { return actual_type1 <= Types().RefType() && !actual_type1.IsBot(); }); - if (reg_num != INVALID_REG && formal_type <= Types().RefType() && !formal_type.IsBot() && res) { + auto res = actual_type.ExistsType([&](auto actual_type1) { + return IsSubtype(actual_type1, Types().RefType(), GetTypeSystem()) && + !GetTypeSystem()->IsBot(actual_type1); + }); + if (reg_num != INVALID_REG && IsSubtype(formal_type, Types().RefType(), GetTypeSystem()) && + !GetTypeSystem()->IsBot(formal_type) && res) { if (CheckRegTypesTakingIntoAccountTypecasts(reg_num, formal_type)) { break; } @@ -3348,9 +3368,10 @@ public: ->opts_.Debug.Allow.WrongSubclassingInMethodArgs) { incompatible_types = true; } - } else if (!formal_type.IsBot() && !formal_type.IsTop() && - !norm_actual_type.Exists( - [&](auto norm_actual_type1) { return norm_actual_type1 <= norm_type; })) { + } else if (!GetTypeSystem()->IsBot(formal_type) && !GetTypeSystem()->IsTop(formal_type) && + !norm_actual_type.Exists([&](auto norm_actual_type1) { + return IsSubtype(norm_actual_type1, norm_type, GetTypeSystem()); + })) { incompatible_types = true; } if (incompatible_types) { @@ -3363,8 +3384,9 @@ public: SET_STATUS_FOR_MSG(BadCallIncompatibleParameter, WARNING); return result = false; } - if (formal_type.IsBot()) { - if (actual_type.ExistsType([](auto actual_type1) { return actual_type1.IsBot(); })) { + if (GetTypeSystem()->IsBot(formal_type)) { + if (actual_type.ExistsType( + [&](auto actual_type1) { return GetTypeSystem()->IsBot(actual_type1); })) { LOG_VERIFIER_CALL_FORMAL_ACTUAL_BOTH_BOT_OR_TOP("Bot"); break; } else { @@ -3374,8 +3396,9 @@ public: SET_STATUS_FOR_MSG(BadCallFormalIsBot, WARNING); return result = false; } - } else if (formal_type.IsTop()) { - if (actual_type.ExistsType([](auto actual_type1) { return actual_type1.IsTop(); })) { + } else if (GetTypeSystem()->IsTop(formal_type)) { + if (actual_type.ExistsType( + [&](auto actual_type1) { return GetTypeSystem()->IsTop(actual_type1); })) { LOG_VERIFIER_CALL_FORMAL_ACTUAL_BOTH_BOT_OR_TOP("Top"); break; } else { @@ -3384,7 +3407,7 @@ public: END_SHOW_MSG(); break; } - } else if (formal_type <= Types().PrimitiveType()) { + } else if (IsSubtype(formal_type, Types().PrimitiveType(), GetTypeSystem())) { // check implicit conversion of primitive types TypeId formal_id = Types().TypeIdOf(formal_type); const Type &integral32_type = Types().Integral32Type(); @@ -3395,7 +3418,7 @@ public: bool need_to_break = false; PandaVector results; actual_type.ForAllTypes([&](Type actual_type1) { - if (!(actual_type1 <= primitive_type)) { + if (!IsSubtype(actual_type1, primitive_type, GetTypeSystem())) { return true; } actual_type_has_primitives = true; @@ -3406,9 +3429,12 @@ public: // special case, where type after contexts LUB operation is inexact one, like // Integral32Type() - if ((formal_type <= integral32_type && actual_type1 <= integral32_type) || - (formal_type <= integral64_type && actual_type1 <= integral64_type) || - (formal_type <= float64_type && actual_type1 <= float64_type)) { + if ((IsSubtype(formal_type, integral32_type, GetTypeSystem()) && + IsSubtype(actual_type1, integral32_type, GetTypeSystem())) || + (IsSubtype(formal_type, integral64_type, GetTypeSystem()) && + IsSubtype(actual_type1, integral64_type, GetTypeSystem())) || + (IsSubtype(formal_type, float64_type, GetTypeSystem()) && + IsSubtype(actual_type1, float64_type, GetTypeSystem()))) { SHOW_MSG(CallFormalActualDifferent) LOG_VERIFIER_CALL_FORMAL_ACTUAL_DIFFERENT(ImageOf(formal_type), ImageOf(actual_type1), ImagesOf(SubtypesOf({actual_type1}))); @@ -3444,9 +3470,10 @@ public: } else { return result = false; } - } else if (formal_type <= Types().MethodType()) { - auto r = - norm_actual_type.Exists([&](auto norm_actual_type1) { return norm_actual_type1 <= norm_type; }); + } else if (IsSubtype(formal_type, Types().MethodType(), GetTypeSystem())) { + auto r = norm_actual_type.Exists([&](auto norm_actual_type1) { + return IsSubtype(norm_actual_type1, norm_type, GetTypeSystem()); + }); if (!r) { SHOW_MSG(BadCallIncompatibleLambdaType) LOG_VERIFIER_BAD_CALL_INCOMPATIBLE_LAMBDA_TYPE( @@ -3504,7 +3531,9 @@ public: return false; } - auto result = CheckMethodAccessViolation(method, CurrentJob, Types()); + auto *plugin = CurrentJob.JobPlugin(); + auto &job_method = CurrentJob.JobCachedMethod(); + auto result = plugin->CheckMethodAccessViolation(method, &job_method, &Types()); if (!result.IsOk()) { const auto &verif_opts = Runtime::GetCurrent()->GetVerificationConfig()->opts_; if (verif_opts.Debug.Allow.MethodAccessViolation && result.IsError()) { @@ -3520,7 +3549,7 @@ public: const auto &method_sig = Types().MethodSignature(*method); auto method_name_getter = [&method]() { return method->GetName(); }; - Type result_type {Types().TypeOf(method_sig.back())}; + Type result_type {method_sig.back().GetType()}; if (!debug::SkipVerificationOfCall(method->id) && !CheckMethodArgs(method_name_getter, *method, @@ -4130,7 +4159,8 @@ private: auto &&arr_elt_type = GetArrayEltType(reg_type); - if (arr_elt_type.ForAllTypes([&](Type arr_elt_type1) { return !(arr_elt_type1 <= expected_elt_type); })) { + if (arr_elt_type.ForAllTypes( + [&](Type arr_elt_type1) { return !IsSubtype(arr_elt_type1, expected_elt_type, GetTypeSystem()); })) { SHOW_MSG(BadArrayElementType2) LOG_VERIFIER_BAD_ARRAY_ELEMENT_TYPE2(ImageOf(arr_elt_type), ImageOf(expected_elt_type)); END_SHOW_MSG(); @@ -4144,7 +4174,8 @@ private: // since there is no problems with storage (all refs are of the same size) // and no problems with information losses, it seems fine at first sight. auto res = acc_type.ForAllTypes([&](Type acc_type1) { - return arr_elt_type.ForAllTypes([&](Type arr_elt_type1) { return !(acc_type1 <= arr_elt_type1); }); + return arr_elt_type.ForAllTypes( + [&](Type arr_elt_type1) { return !IsSubtype(acc_type1, arr_elt_type1, GetTypeSystem()); }); }); if (res) { PandaVector arr_elt_type_members; @@ -4208,7 +4239,8 @@ private: auto &&acc_type = GetAccType(); - if (acc_type.ForAllTypes([&](Type acc_type1) { return !(acc_type1 <= acc_supertype); })) { + if (acc_type.ForAllTypes( + [&](Type acc_type1) { return !IsSubtype(acc_type1, acc_supertype, GetTypeSystem()); })) { LOG_VERIFIER_BAD_ACCUMULATOR_TYPE2(ImageOf(acc_type), ImagesOf(SubtypesOf({acc_supertype}))); SET_STATUS_FOR_MSG(BadArrayElementType, WARNING); return false; @@ -4346,15 +4378,15 @@ private: bool IsConcreteArrayType(Type type) { - return type <= Types().ArrayType() && type.ParamsSize() == 1; + return IsSubtype(type, Types().ArrayType(), GetTypeSystem()) && type.TypeArgsSize(GetTypeSystem()) == 1; } Type GetArrayEltType(Type arr_type) { - if (arr_type <= Types().ArrayType()) { - auto &&type_params = arr_type.Params(); + if (IsSubtype(arr_type, Types().ArrayType(), GetTypeSystem())) { + auto &&type_params = arr_type.GetTypeArgs(GetTypeSystem()); ASSERT(type_params.size() == 1); - return Types().TypeOf(type_params[0]); + return type_params[0].GetType(); } else { return Types().Top(); } @@ -4365,7 +4397,7 @@ private: if (arr_type.IsType()) { return GetArrayEltType(arr_type.GetType()); } else if (arr_type.IsTypeSet()) { - TypeSet result(Types().GetKind(), Types().GetThreadNum()); + TypeSet result {}; arr_type.GetTypeSet().ForAll([&](Type type1) { if (IsConcreteArrayType(type1)) { result.Insert(GetArrayEltType(type1)); @@ -4492,7 +4524,7 @@ private: template bool CheckArrayCtor(const CachedMethod &ctor, Fetcher reg_nums) { - Type klass = Types().TypeOf(ctor.klass); + Type klass = TypeOfClass(&ctor.klass); if (!klass.IsValid()) { return false; } @@ -4536,15 +4568,16 @@ private: PandaVector SubSupTypesOf(Type type) { + auto type_system = GetTypeSystem(); PandaVector result; - auto callback = [&result](auto t) { - if (!t.IsBot() && !t.IsTop()) { + auto callback = [&](auto t) { + if (!type_system->IsBot(t) && !type_system->IsTop(t)) { result.push_back(t); } return true; }; - type.ForAllSubtypes(callback); - type.ForAllSupertypes(callback); + type.ForAllSubtypes(type_system, callback); + type.ForAllSupertypes(type_system, callback); return result; } diff --git a/verification/absint/absint.cpp b/verification/absint/absint.cpp index fcb70bf7b0d556280060cc316e7e5116752c38a5..81d558a414323944cfff664ec5e3fe79cf86f438 100644 --- a/verification/absint/absint.cpp +++ b/verification/absint/absint.cpp @@ -29,6 +29,7 @@ #include "macros.h" +#include #include #include "abs_int_inl.h" @@ -75,13 +76,13 @@ VerificationContext PrepareVerificationContext(PandaTypes &pandaTypes, const Job const auto &signature = verifCtx.Types().MethodSignature(cached_method); for (size_t idx = 0; idx < signature.size() - 1; ++idx) { - const Type &t = pandaTypes.TypeOf(signature[idx]); + const Type &t = Type {signature[idx]}; reg_ctx[num_vregs++] = AbstractTypedValue {t, verifCtx.NewVar(), AbstractTypedValue::Start {}, idx}; } LOG_VERIFIER_DEBUG_REGISTERS("registers =", reg_ctx.DumpRegs([&pandaTypes](const auto &t) { return pandaTypes.ImageOf(t); })); - verifCtx.SetReturnType(pandaTypes.TypeOf(signature.back())); + verifCtx.SetReturnType(Type {signature.back()}); LOG_VERIFIER_DEBUG_RESULT(pandaTypes.ImageOf(verifCtx.ReturnType())); @@ -149,6 +150,7 @@ bool ComputeRegContext(CflowExcHandlerInfo const &exc_handler, VerificationConte { auto &cflow_info = verifCtx.CflowInfo(); auto &exec_ctx = verifCtx.ExecCtx(); + auto type_system = verifCtx.GetTypeSystem(); #ifndef NDEBUG const void *codeStart = cflow_info.InstMap().AddrStart(); @@ -166,27 +168,21 @@ bool ComputeRegContext(CflowExcHandlerInfo const &exc_handler, VerificationConte #endif bool first = true; - exec_ctx.ForContextsOnCheckPointsInRange( + exec_ctx.ForContextsOnCheckPointsInRange(exc_handler.TryBlock.Start, exc_handler.TryBlock.End, + [&](const uint8_t *pc, const RegContext &ctx) { + if (cflow_info.ExcSrcMap().IsExceptionSource(pc)) { #ifndef NDEBUG - exc_handler.TryBlock.Start, exc_handler.TryBlock.End, - [&cflow_info, ®_context, &image_of, &first](const uint8_t *pc, const RegContext &ctx) { -#else - exc_handler.TryBlock.Start, exc_handler.TryBlock.End, - [&cflow_info, ®_context, &first](const uint8_t *pc, const RegContext &ctx) { + LOG_VERIFIER_DEBUG_REGISTERS("+", ctx.DumpRegs(image_of)); #endif - if (cflow_info.ExcSrcMap().IsExceptionSource(pc)) { -#ifndef NDEBUG - LOG_VERIFIER_DEBUG_REGISTERS("+", ctx.DumpRegs(image_of)); -#endif - if (first) { - first = false; - *reg_context = ctx; - } else { - *reg_context &= ctx; - } - } - return true; - }); + if (first) { + first = false; + *reg_context = ctx; + } else { + reg_context->UnionWith(&ctx, verifCtx.GetTypeSystem()); + } + } + return true; + }); #ifndef NDEBUG LOG_VERIFIER_DEBUG_REGISTERS("=", reg_context->DumpRegs(image_of)); #endif @@ -196,12 +192,12 @@ bool ComputeRegContext(CflowExcHandlerInfo const &exc_handler, VerificationConte return false; } - reg_context->RemoveInconsistentRegs(); + reg_context->RemoveInconsistentRegs(type_system); #ifndef NDEBUG - if (reg_context->HasInconsistentRegs()) { + if (reg_context->HasInconsistentRegs(type_system)) { LOG_VERIFIER_COMMON_CONTEXT_INCONSISTENT_REGISTER_HEADER(); - for (int reg_num : reg_context->InconsistentRegsNums()) { + for (int reg_num : reg_context->InconsistentRegsNums(type_system)) { LOG(DEBUG, VERIFIER) << AbsIntInstructionHandler::RegisterName(reg_num); } } @@ -230,7 +226,7 @@ VerificationStatus VerifyExcHandler(CflowExcHandlerInfo const &exc_handler, Veri << "0x" << takeAddress(exc_handler.TryBlock.End) << " ]"; #endif - Type exception_type; + Type exception_type {}; if (exception.HasRef()) { exception_type = verifCtx.Types().TypeOf(exception.Get()); } else { diff --git a/verification/absint/exec_context.h b/verification/absint/exec_context.h index d274441de42780872c6d350fbc49d597bef50ecb..dccabbf81e72d60d141155de2503966b4f3f4178 100644 --- a/verification/absint/exec_context.h +++ b/verification/absint/exec_context.h @@ -60,20 +60,20 @@ public: void StoreCurrentRegContextForAddrIfHasContext(const uint8_t *addr, Reporter reporter) { RegContext &ctx = RegContextOnCheckPoint_[addr]; - auto lub = ctx & CurrentRegContext_; + auto lub = RcUnion(&ctx, &CurrentRegContext_, type_system_); - if (lub.HasInconsistentRegs()) { - for (int reg_idx : lub.InconsistentRegsNums()) { + if (lub.HasInconsistentRegs(type_system_)) { + for (int reg_idx : lub.InconsistentRegsNums(type_system_)) { if (!reporter(reg_idx, CurrentRegContext_[reg_idx], ctx[reg_idx])) { break; } } } - ctx &= CurrentRegContext_; + ctx.UnionWith(&CurrentRegContext_, type_system_); - if (ctx.HasInconsistentRegs()) { - ctx.RemoveInconsistentRegs(); + if (ctx.HasInconsistentRegs(type_system_)) { + ctx.RemoveInconsistentRegs(type_system_); } } @@ -81,8 +81,8 @@ public: { if (HasContext(addr)) { RegContext &ctx = RegContextOnCheckPoint_[addr]; - ctx &= CurrentRegContext_; - ctx.RemoveInconsistentRegs(); + ctx.UnionWith(&CurrentRegContext_, type_system_); + ctx.RemoveInconsistentRegs(type_system_); } else if (IsCheckPoint(addr)) { RegContextOnCheckPoint_[addr] = CurrentRegContext_; } @@ -195,10 +195,11 @@ public: }); } - ExecContext(const uint8_t *pcStartPtr, const uint8_t *pcEndPtr) + ExecContext(const uint8_t *pcStartPtr, const uint8_t *pcEndPtr, TypeSystem const *type_system) : CheckPoint_ {pcStartPtr, pcEndPtr}, ProcessedJumps_ {pcStartPtr, pcEndPtr}, - TypecastPoint_ {pcStartPtr, pcEndPtr} + TypecastPoint_ {pcStartPtr, pcEndPtr}, + type_system_ {type_system} { } @@ -212,6 +213,7 @@ private: AddrMap TypecastPoint_; PandaUnorderedSet> EntryPoint_; PandaUnorderedMap RegContextOnCheckPoint_; + TypeSystem const *type_system_; RegContext CurrentRegContext_; }; } // namespace panda::verifier diff --git a/verification/absint/panda_types.cpp b/verification/absint/panda_types.cpp index b78a33b0db391d621c20ff3167fe3484d5b94211..1cb9d2cf00188ae87bacce5131c53069906b2eb6 100644 --- a/verification/absint/panda_types.cpp +++ b/verification/absint/panda_types.cpp @@ -22,11 +22,13 @@ #include "runtime/include/runtime.h" #include "utils/span.h" -#include "verification/type/type_params.h" -#include "verification/type/type_system.h" -#include "verification/type/type_sort.h" +#include "verification/type/type_arg.h" #include "verification/type/type_image.h" +#include "verification/type/type_sort.h" +#include "verification/type/type_system.h" +#include "verification/type/type_type.h" #include "verification/jobs/cache.h" +#include "verification/plugins.h" #include "verifier_messages.h" @@ -42,45 +44,29 @@ namespace panda::verifier { Type PandaTypes::NormalizedTypeOf(Type type) { ASSERT(type.IsValid()); - if (type.IsBot() || type.IsTop()) { + if (type_system_.IsBot(type) || type_system_.IsTop(type)) { return type; } auto t = NormalizedTypeOf_.find(type); if (t != NormalizedTypeOf_.cend()) { return t->second; } - Type result = type; - if (type <= Integral32Type()) { - result = Normalize()(~Integral32Type()); - NormalizedTypeOf_[type] = result; - } else if (type <= Integral64Type()) { - result = Normalize()(~Integral64Type()); - NormalizedTypeOf_[type] = result; - // NOLINTNEXTLINE(bugprone-branch-clone) - } else if (type <= F32()) { - result = Normalize()(~F32()); - NormalizedTypeOf_[type] = result; - } else if (type <= F64()) { - result = Normalize()(~F64()); - NormalizedTypeOf_[type] = result; - } else if (type <= MethodType()) { - result = NormalizedMethod()(NormalizeMethodSignature(type.Params())); - } + Type result = plugin_->NormalizeType(type, this); NormalizedTypeOf_[type] = result; return result; } -TypeParams PandaTypes::NormalizeMethodSignature(const TypeParams &sig) +TypeArgs PandaTypes::NormalizeMethodSignature(const TypeArgs &sig) { - TypeParams result {TypeSystemKind::JAVA, threadnum_}; - sig.ForEach([&result, this](const auto ¶m) { - const Type &type = param; - result >> (NormalizedTypeOf(type) * param.Variance()); - }); + TypeArgs result; + for (auto const &type_arg : sig) { + const Type &type = Type(TypeArg {type_arg}); + result.push_back(NormalizedTypeOf(type) * type_arg.Variance()); + }; return result; } -const TypeParams &PandaTypes::NormalizedMethodSignature(const CachedMethod &method) +const TypeArgs &PandaTypes::NormalizedMethodSignature(const CachedMethod &method) { auto &&method_id = method.id; auto it = NormalizedSigOfMethod_.find(method_id); @@ -93,15 +79,15 @@ const TypeParams &PandaTypes::NormalizedMethodSignature(const CachedMethod &meth return NormalizedSigOfMethod_[method_id]; } -const TypeParams &PandaTypes::MethodSignature(const CachedMethod &method) +const TypeArgs &PandaTypes::MethodSignature(const CachedMethod &method) { auto &&method_id = method.id; auto it = SigOfMethod_.find(method_id); if (it != SigOfMethod_.end()) { return it->second; } - TypeParams params {TypeSystemKind::JAVA, threadnum_}; - Type return_type; + TypeArgs type_args; + Type return_type {}; bool return_is_processed = false; for (const auto &arg : method.signature) { Type t = LibCache::Visit( @@ -118,14 +104,14 @@ const TypeParams &PandaTypes::MethodSignature(const CachedMethod &method) LOG_VERIFIER_JAVA_TYPES_METHOD_ARG_CANNOT_BE_CONVERTED_TO_TYPE(method.GetName()); } if (return_is_processed) { - params >> -t; + type_args.push_back(-t); } else { return_type = t; return_is_processed = true; } } - params >> +return_type; - SigOfMethod_[method_id] = params; + type_args.push_back(+return_type); + SigOfMethod_[method_id] = type_args; return SigOfMethod_[method_id]; } @@ -164,7 +150,7 @@ PandaTypes::TypeId PandaTypes::TypeIdOf(const Type &type) const if (type == F64()) { return TypeId::F64; } - if (type.IsTop()) { + if (type_system_.IsTop(type)) { return TypeId::VOID; } @@ -179,63 +165,12 @@ Type PandaTypes::TypeOf(const CachedMethod &method) return k->second; } ASSERT(!DoNotCalculateMethodType_); - auto &&sig = MethodSignature(method); - auto &&norm_sig = NormalizedMethodSignature(method); - Type type; - type = Method()(sig); - type << MethodType(); - TypeOfMethod_[id] = type; + Type result = plugin_->TypeOfMethod(&method, this); + TypeOfMethod_[id] = result; // Normalize(Method) <: NormalizedMethod(NormalizedMethodSig) - auto norm_type = Normalize()(~type); - auto norm_method = NormalizedMethod()(norm_sig); - norm_type << norm_method; - NormalizedTypeOf_[type] = norm_method; + NormalizedTypeOf_[result] = plugin_->NormalizeType(result, this); MethodNameOfId_[id] = method.GetName(); - return type; -} - -void PandaTypes::SetArraySubtyping(const Type &t) -{ - PandaVector to_process; - PandaVector just_subtype; - t.ForAllSupertypes([&]([[maybe_unused]] const Type &st) { - if (!Array()[+st]) { - to_process.emplace_back(st); - } else { - just_subtype.emplace_back(st); - } - return true; - }); - auto array_type = Array()(+t); - for (const auto &st : just_subtype) { - array_type << Array()(+st); - } - for (const auto &st : to_process) { - array_type << Array()(+st); - SetArraySubtyping(st); - } -} - -Type PandaTypes::TypeOfArray(const CachedClass &klass) -{ - ASSERT(klass.flags[CachedClass::Flag::ARRAY_CLASS]); - - Type type; - auto component = klass.GetArrayComponent(); - if (component.HasRef()) { - auto component_type = TypeOf(component.Get()); - type = Array()(+component_type); - SetArraySubtyping(component_type); - } else { - type = Array()(+Top()); - LOG_VERIFIER_JAVA_TYPES_ARRAY_COMPONENT_TYPE_IS_UNDEFINED(); - } - type << ArrayType(); - if (klass.flags[CachedClass::Flag::OBJECT_ARRAY_CLASS]) { - type << ObjectArrayType(); - } - - return type; + return result; } Type PandaTypes::TypeOf(const CachedClass &klass) @@ -246,101 +181,26 @@ Type PandaTypes::TypeOf(const CachedClass &klass) return k->second; } - PandaVector supertypes; - for (const auto &ancestor : klass.ancestors) { - // ancestor here cannot be unresolved descriptor - ASSERT(LibCache::IsRef(ancestor)); - supertypes.emplace_back(TypeOf(LibCache::GetRef(ancestor))); - } - - Type type; - bool is_primitive = klass.flags[CachedClass::Flag::PRIMITIVE]; + Type type = plugin_->TypeOfClass(&klass, this); auto class_name = klass.GetName(); - if (klass.flags[CachedClass::Flag::ARRAY_CLASS]) { - type = TypeOfArray(klass); - } else if (is_primitive) { - type = TypeOf(klass.type_id); - } else { - type = TypeForName(class_name); - } - - if (!is_primitive) { - bool is_string = klass.flags[CachedClass::Flag::STRING_CLASS]; - if (is_string) { - type << StringType(); - } else { - type << ObjectType(); - } - NullRefType() << type << RefType(); - TypeClass()(~type) << TypeClassType() << RefType(); - } - if (klass.flags[CachedClass::Flag::ABSTRACT]) { - Abstract()(~type) << AbstractType(); - } - for (auto &super : supertypes) { - type << super; - } ClassNameOfId_[id] = class_name; TypeOfClass_[id] = type; NormalizedTypeOf(type); return type; } -Type PandaTypes::TypeOf(PandaTypes::TypeId id) const +Type PandaTypes::TypeOf(PandaTypes::TypeId id) { - Type type; - switch (id) { - case TypeId::VOID: - type = Top(); - break; - case TypeId::U1: - type = U1(); - break; - case TypeId::I8: - type = I8(); - break; - case TypeId::U8: - type = U8(); - break; - case TypeId::I16: - type = I16(); - break; - case TypeId::U16: - type = U16(); - break; - case TypeId::I32: - type = I32(); - break; - case TypeId::U32: - type = U32(); - break; - case TypeId::I64: - type = I64(); - break; - case TypeId::U64: - type = U64(); - break; - case TypeId::F32: - type = F32(); - break; - case TypeId::F64: - type = F64(); - break; - case TypeId::REFERENCE: - type = RefType(); - break; - default: - LOG_VERIFIER_JAVA_TYPES_CANNOT_CONVERT_TYPE_ID_TO_TYPE(id); - type = Top(); - } - return type; + return plugin_->TypeOfTypeId(id, this); } -PandaTypes::PandaTypes(ThreadNum threadnum) - : threadnum_ {threadnum}, - TypeSystem_ {TypeSystems::Get(TypeSystemKind::JAVA, threadnum_)}, +PandaTypes::PandaTypes(panda_file::SourceLang lang) + : plugin_ {plugin::GetLanguagePlugin(lang)}, + sort_names_ {"Bot", "Top"}, + type_system_ {sort_names_["Bot"], sort_names_["Top"]}, + type_image_ {&sort_names_, &type_system_}, Array_ {ParametricTypeForName("Array")}, Method_ {ParametricTypeForName("Method")}, NormalizedMethod_ {ParametricTypeForName("NormalizedMethod")}, @@ -385,66 +245,89 @@ PandaTypes::PandaTypes(ThreadNum threadnum) Float32Type_ {TypeForName("Float32Bits")}, Float64Type_ {TypeForName("Float64Bits")} { - for (panda::panda_file::SourceLang lang : panda::panda_file::LANG_ITERATOR) { - LanguageContextBase *ctx = panda::plugins::GetLanguageContextBase(lang); - - auto emplace = [&](auto &types, const char *name) { - if (name != nullptr) { - types.emplace(lang, TypeForName(name)); - } - }; - - emplace(LangContextTypesClass_, ctx->GetVerificationTypeClass()); - emplace(LangContextTypesObjects_, ctx->GetVerificationTypeObject()); - emplace(LangContextTypesThrowables_, ctx->GetVerificationTypeThrowable()); - } + LanguageContextBase *ctx = panda::plugins::GetLanguageContextBase(lang); + + auto emplace = [&](auto &types, const char *name) { + if (name != nullptr) { + types.emplace(lang, TypeForName(name)); + } + }; - TypeSystem_.SetIncrementalRelationClosureMode(false); + emplace(LangContextTypesClass_, ctx->GetVerificationTypeClass()); + emplace(LangContextTypesObjects_, ctx->GetVerificationTypeObject()); + emplace(LangContextTypesThrowables_, ctx->GetVerificationTypeThrowable()); + + type_system_.SetIncrementalRelationClosureMode(false); // base subtyping of primitive types - I8() << I16() << I32(); - U1() << U8() << U16() << U32(); + MakeSubtype(I8(), I16(), &type_system_); + MakeSubtype(I16(), I32(), &type_system_); + MakeSubtype(U1(), U8(), &type_system_); + MakeSubtype(U8(), U16(), &type_system_); + MakeSubtype(U16(), U32(), &type_system_); + // integral - TypeSet(U1(), I8(), U8()) << Integral8Type(); - TypeSet(Integral8Type(), I16(), U16()) << Integral16Type(); - TypeSet(Integral16Type(), I32(), U32()) << Integral32Type(); - TypeSet(I64(), U64()) << Integral64Type(); - // sizes - F32() << Float32Type(); - F64() << Float64Type(); - TypeSet(Integral32Type(), Float32Type()) << Bits32Type(); - TypeSet(Integral64Type(), Float64Type()) << Bits64Type(); - TypeSet(Bits32Type(), Bits64Type()) << PrimitiveType(); - - TypeClassType() << RefType(); - - TypeSet object_or_ref(ObjectType(), RefType()); - for (const auto &[lang, type] : LangContextTypesClass_) { - NullRefType() << type << object_or_ref; - (void)lang; + for (auto ii : {U1(), I8(), U8()}) { + MakeSubtype(ii, Integral8Type(), &type_system_); } - - for (const auto &[lang, type] : LangContextTypesObjects_) { - NullRefType() << type << object_or_ref; - (void)lang; + for (auto ii : {Integral8Type(), I16(), U16()}) { + MakeSubtype(ii, Integral16Type(), &type_system_); + } + for (auto ii : {Integral16Type(), I32(), U32()}) { + MakeSubtype(ii, Integral32Type(), &type_system_); + } + for (auto ii : {I64(), U64()}) { + MakeSubtype(ii, Integral64Type(), &type_system_); } - for (const auto &[lang, type] : LangContextTypesThrowables_) { - NullRefType() << type << object_or_ref; - (void)lang; + // sizes + MakeSubtype(F32(), Float32Type(), &type_system_); + MakeSubtype(F64(), Float64Type(), &type_system_); + for (auto tt : {Integral32Type(), Float32Type()}) { + MakeSubtype(tt, Bits32Type(), &type_system_); + } + for (auto tt : {Integral64Type(), Float64Type()}) { + MakeSubtype(tt, Bits64Type(), &type_system_); + } + for (auto tt : {Bits32Type(), Bits64Type()}) { + MakeSubtype(tt, PrimitiveType(), &type_system_); } - NullRefType() << (ArrayType() | ObjectArrayType()) << RefType(); + MakeSubtype(TypeClassType(), RefType(), &type_system_); - for (const auto &[lang, type] : LangContextTypesObjects_) { - TypeClass()(~Object(lang)) << TypeClassType(); - (void)type; + auto object_or_ref = {ObjectType(), RefType()}; + if (LangContextTypesClass_.count(lang) > 0) { + auto types = LangContextTypesClass_.at(lang); + MakeSubtype(NullRefType(), types, &type_system_); + for (auto oo : object_or_ref) { + MakeSubtype(types, oo, &type_system_); + } } + if (LangContextTypesObjects_.count(lang) > 0) { + auto objects = LangContextTypesObjects_.at(lang); + MakeSubtype(NullRefType(), objects, &type_system_); + for (auto oo : object_or_ref) { + MakeSubtype(objects, oo, &type_system_); + } + } + if (LangContextTypesThrowables_.count(lang) > 0) { + auto throwables = LangContextTypesThrowables_.at(lang); + MakeSubtype(NullRefType(), throwables, &type_system_); + for (auto oo : object_or_ref) { + MakeSubtype(throwables, oo, &type_system_); + } + } + for (auto ao : {ArrayType(), ObjectArrayType()}) { + MakeSubtype(NullRefType(), ao, &type_system_); + MakeSubtype(ao, RefType(), &type_system_); + } + + type_system_.CloseSubtypingRelation(); - TypeSystem_.CloseSubtypingRelation(); + type_system_.SetIncrementalRelationClosureMode(false); + type_system_.SetDeferIncrementalRelationClosure(false); - TypeSystem_.SetIncrementalRelationClosureMode(false); - TypeSystem_.SetDeferIncrementalRelationClosure(false); + plugin_->TypeSystemSetup(this); } } // namespace panda::verifier diff --git a/verification/absint/panda_types.h b/verification/absint/panda_types.h index f2a3c9a7aae352e259ea6236a3a905c7bbf77077..391e4b9b731ee88f1ee5a2c55a6e01efbb53cb8a 100644 --- a/verification/absint/panda_types.h +++ b/verification/absint/panda_types.h @@ -19,7 +19,9 @@ #include "runtime/include/method.h" #include "runtime/include/class.h" -#include "verification/type/type_systems.h" +#include "verification/jobs/cache.h" + +#include "verification/type/type_image.h" #include "verification/type/type_system.h" #include "verification/type/type_sort.h" #include "verification/type/type_tags.h" @@ -27,13 +29,16 @@ #include "verification/util/synchronized.h" #include "verification/util/callable.h" -#include "verification/jobs/cache.h" + +#include "verification/value/variables.h" #include "runtime/include/mem/panda_containers.h" #include "runtime/include/mem/panda_string.h" #include "libpandabase/os/mutex.h" -#include "plugins.h" +namespace panda::verifier::plugin { +class Plugin; +} namespace panda::verifier { class PandaTypes { @@ -45,6 +50,11 @@ public: // TODO(vdyadov): solve problem with cycles // (todo: mutual recursive types) + PandaTypes(panda_file::SourceLang lang); + NO_COPY_SEMANTIC(PandaTypes); + + ~PandaTypes() = default; + const PandaString &ClassNameOfId(Id id) { return ClassNameOfId_[id]; @@ -56,53 +66,39 @@ public: } Type NormalizedTypeOf(Type type); - TypeParams NormalizeMethodSignature(const TypeParams &sig); + TypeArgs NormalizeMethodSignature(const TypeArgs &sig); - const TypeParams &MethodSignature(const CachedMethod &method); - const TypeParams &NormalizedMethodSignature(const CachedMethod &method); + const TypeArgs &MethodSignature(const CachedMethod &method); + const TypeArgs &NormalizedMethodSignature(const CachedMethod &method); TypeId TypeIdOf(const Type &type) const; Type TypeOf(const CachedMethod &method); Type TypeOf(const CachedClass &klass); - Type TypeOf(TypeId id) const; - - Type TypeOf(const TypeParamIdx &idx) const - { - return {TypeSystemKind::JAVA, threadnum_, idx}; - } + Type TypeOf(TypeId id); void CloseAccumulatedSubtypingRelation() { - TypeSystem_.CloseAccumulatedSubtypingRelation(); + type_system_.CloseAccumulatedSubtypingRelation(); }; - SortIdx GetSort(const PandaString &name) const - { - return TypeSystems::GetSort(TypeSystemKind::JAVA, threadnum_, name); - } - - TypeSystemKind GetKind() const + SortIdx GetSort(const PandaString &name) { - return TypeSystemKind::JAVA; + return sort_names_[name]; } - ThreadNum GetThreadNum() const + TypeSystem &GetTypeSystem() { - return threadnum_; + return type_system_; } - explicit PandaTypes(ThreadNum threadnum); - NO_COPY_SEMANTIC(PandaTypes); - - ~PandaTypes() = default; Type Bot() const { - return TypeSystem_.Bot(); + return type_system_.Bot(); } Type Top() const { - return TypeSystem_.Top(); + return type_system_.Top(); } const ParametricType &Array() { @@ -283,37 +279,37 @@ public: return LangContextTypesThrowables_.at(lang); } - const PandaString &ImageOf(const Type &type) + const PandaString &ImageOf(Type type) { - return TypeSystems::ImageOfType(type); + return type_image_.ImageOfType(type); } - PandaString ImageOf(const TypeParams ¶ms) + PandaString ImageOf(const TypeArgs &type_args) { - return TypeSystems::ImageOfTypeParams(params); + return type_image_.ImageOfTypeArgs(type_args); } template - void ForSubtypesOf(const Type &type, Handler &&handler) const + void ForSubtypesOf(Type type, Handler &&handler) const { - type.ForAllSubtypes(std::forward(handler)); + type.ForAllSubtypes(&type_system_, std::forward(handler)); } template - void ForSupertypesOf(const Type &type, Handler &&handler) const + void ForSupertypesOf(Type type, Handler &&handler) const { - type.ForAllSupertypes(std::forward(handler)); + type.ForAllSupertypes(&type_system_, std::forward(handler)); } - PandaVector SubtypesOf(const Type &type) const + PandaVector SubtypesOf(Type type) const { PandaVector result; - type.ForAllSubtypes([&result](const auto &t) { + type.ForAllSubtypes(&type_system_, [&result](const auto &t) { result.push_back(t); return true; }); return result; } - PandaVector SupertypesOf(const Type &type) const + PandaVector SupertypesOf(Type type) const { PandaVector result; - type.ForAllSupertypes([&result](const auto &t) { + type.ForAllSupertypes(&type_system_, [&result](const auto &t) { result.push_back(t); return true; }); @@ -342,8 +338,8 @@ public: template void DisplaySubtyping(Handler handler) { - TypeSystem_.ForAllTypes([this, &handler](const Type &type) { - type.ForAllSupertypes([this, &handler, &type](const Type &supertype) { + type_system_.ForAllTypes([this, &handler](const Type &type) { + type.ForAllSupertypes(&type_system_, [this, &handler, &type](const Type &supertype) { handler(ImageOf(type), ImageOf(supertype)); return true; }); @@ -371,16 +367,23 @@ public: return Variables_.NewVar(); } + Type TypeForName(const PandaString &name) + { + return ParametricTypeForName(name).WithTypeArgs({}, &type_system_); + } + private: - ThreadNum threadnum_; + plugin::Plugin const *plugin_; PandaUnorderedMap TypeOfClass_; PandaUnorderedMap TypeOfMethod_; - PandaUnorderedMap SigOfMethod_; - PandaUnorderedMap NormalizedSigOfMethod_; + PandaUnorderedMap SigOfMethod_; + PandaUnorderedMap NormalizedSigOfMethod_; PandaUnorderedMap ClassNameOfId_; PandaUnorderedMap MethodNameOfId_; PandaUnorderedMap NormalizedTypeOf_; - TypeSystem &TypeSystem_; + SortNames sort_names_; + TypeSystem type_system_; + TypeImage type_image_; Variables Variables_; // base sorts @@ -434,18 +437,9 @@ private: bool DoNotCalculateMethodType_ {true}; - void SetArraySubtyping(const Type &t); - - Type TypeOfArray(const CachedClass &klass); - ParametricType ParametricTypeForName(const PandaString &name) { - return TypeSystem_.Parametric(GetSort(name)); - } - - Type TypeForName(const PandaString &name) - { - return ParametricTypeForName(name)(); + return type_system_.Parametric(GetSort(name)); } }; } // namespace panda::verifier diff --git a/verification/absint/reg_context.h b/verification/absint/reg_context.h index bcfd653ec35bff0373cc10eec8f032aec20ae8e2..4d7ac72660ae826f4ff37795bedfbe549b22ce86 100644 --- a/verification/absint/reg_context.h +++ b/verification/absint/reg_context.h @@ -38,36 +38,20 @@ Design decisions: class RegContext { public: - RegContext() = default; - explicit RegContext(size_t size) : Regs_(size) {} + explicit RegContext() = default; + RegContext(size_t size) : Regs_(size) {} ~RegContext() = default; DEFAULT_MOVE_SEMANTIC(RegContext); DEFAULT_COPY_SEMANTIC(RegContext); - RegContext operator&(const RegContext &rhs) const + RegContext &UnionWith(RegContext const *rhs, TypeSystem const *tsys) { - RegContext result(std::max(Regs_.size(), rhs.Regs_.size())); - auto result_it = result.Regs_.begin(); auto lhs_it = Regs_.begin(); - auto rhs_it = rhs.Regs_.begin(); - while (lhs_it != Regs_.end() && rhs_it != rhs.Regs_.end()) { + auto rhs_it = rhs->Regs_.begin(); + while (lhs_it != Regs_.end() && rhs_it != rhs->Regs_.end()) { if (!(*lhs_it).IsNone() && !(*rhs_it).IsNone()) { - *result_it = *lhs_it & *rhs_it; - } - ++lhs_it; - ++rhs_it; - ++result_it; - } - return result; - } - RegContext &operator&=(const RegContext &rhs) - { - auto lhs_it = Regs_.begin(); - auto rhs_it = rhs.Regs_.begin(); - while (lhs_it != Regs_.end() && rhs_it != rhs.Regs_.end()) { - if (!(*lhs_it).IsNone() && !(*rhs_it).IsNone()) { - *lhs_it = *lhs_it & *rhs_it; + *lhs_it = AtvJoin(&*lhs_it, &*rhs_it, tsys); } else { *lhs_it = AbstractTypedValue {}; } @@ -152,11 +136,11 @@ public: } } } - bool HasInconsistentRegs() const + bool HasInconsistentRegs(TypeSystem const *tsys) const { bool result = false; - EnumerateAllRegs([&result](int, const auto &av) { - if (!av.IsConsistent()) { + EnumerateAllRegs([&](int, const auto &av) { + if (!av.IsConsistent(tsys)) { result = true; return false; } @@ -164,11 +148,11 @@ public: }); return result; } - auto InconsistentRegsNums() const + auto InconsistentRegsNums(TypeSystem const *tsys) const { PandaVector result; - EnumerateAllRegs([&result](int num, const auto &av) { - if (!av.IsConsistent()) { + EnumerateAllRegs([&](int num, const auto &av) { + if (!av.IsConsistent(tsys)) { result.push_back(num); } return true; @@ -188,10 +172,10 @@ public: Regs_.clear(); ConflictingRegs_.clear(); } - void RemoveInconsistentRegs() + void RemoveInconsistentRegs(TypeSystem const *tsys) { - EnumerateAllRegs([this](int num, auto &atv) { - if (!atv.IsConsistent()) { + EnumerateAllRegs([this, tsys](int num, auto &atv) { + if (!atv.IsConsistent(tsys)) { ConflictingRegs_.insert(num); atv = AbstractTypedValue {}; } else { @@ -224,7 +208,27 @@ private: // TODO(vdyadov): After introducing sparse bit-vectors, change ConflictingRegs_ type. PandaUnorderedSet ConflictingRegs_; + + friend RegContext RcUnion(RegContext const *lhs, RegContext const *rhs, TypeSystem const *); }; + +inline RegContext RcUnion(RegContext const *lhs, RegContext const *rhs, TypeSystem const *tsys) +{ + RegContext result(std::max(lhs->Regs_.size(), rhs->Regs_.size())); + auto result_it = result.Regs_.begin(); + auto lhs_it = lhs->Regs_.begin(); + auto rhs_it = rhs->Regs_.begin(); + while (lhs_it != lhs->Regs_.end() && rhs_it != rhs->Regs_.end()) { + if (!(*lhs_it).IsNone() && !(*rhs_it).IsNone()) { + *result_it = AtvJoin(&*lhs_it, &*rhs_it, tsys); + } + ++lhs_it; + ++rhs_it; + ++result_it; + } + return result; +} + } // namespace panda::verifier #endif // !PANDA_VERIFIER_ABSINT_REG_CONTEXT_HPP_ diff --git a/verification/absint/tests/exec_context_test.cpp b/verification/absint/tests/exec_context_test.cpp index 71c36cc47ab1f1ecc1b6923c7c49dcda49f204c1..5365145635fedaeef23023f008f5f2be76b4c836 100644 --- a/verification/absint/tests/exec_context_test.cpp +++ b/verification/absint/tests/exec_context_test.cpp @@ -37,19 +37,23 @@ TEST_F(VerifierTest, AbsIntExecContext) TypeSystem type_system {sort["Bot"], sort["Top"]}; Variables variables; - auto i8 = type_system.Parametric(sort["i8"])(); - auto i16 = type_system.Parametric(sort["i16"])(); - auto i32 = type_system.Parametric(sort["i32"])(); - auto i64 = type_system.Parametric(sort["i64"])(); + auto i8 = type_system.Parametric(sort["i8"]).WithTypeArgs({}, &type_system); + auto i16 = type_system.Parametric(sort["i16"]).WithTypeArgs({}, &type_system); + auto i32 = type_system.Parametric(sort["i32"]).WithTypeArgs({}, &type_system); + auto i64 = type_system.Parametric(sort["i64"]).WithTypeArgs({}, &type_system); - i8 << i16 << i32 << i64; + MakeSubtype(i8, i16, &type_system); + MakeSubtype(i16, i32, &type_system); + MakeSubtype(i32, i64, &type_system); - auto u8 = type_system.Parametric(sort["u8"])(); - auto u16 = type_system.Parametric(sort["u16"])(); - auto u32 = type_system.Parametric(sort["u32"])(); - auto u64 = type_system.Parametric(sort["u64"])(); + auto u8 = type_system.Parametric(sort["u8"]).WithTypeArgs({}, &type_system); + auto u16 = type_system.Parametric(sort["u16"]).WithTypeArgs({}, &type_system); + auto u32 = type_system.Parametric(sort["u32"]).WithTypeArgs({}, &type_system); + auto u64 = type_system.Parametric(sort["u64"]).WithTypeArgs({}, &type_system); - u8 << u16 << u32 << u64; + MakeSubtype(u8, u16, &type_system); + MakeSubtype(u16, u32, &type_system); + MakeSubtype(u32, u64, &type_system); auto nv = [&variables] { return variables.NewVar(); }; @@ -59,7 +63,7 @@ TEST_F(VerifierTest, AbsIntExecContext) uint8_t instructions[128]; - ExecContext exec_ctx {&instructions[0], &instructions[127]}; + ExecContext exec_ctx {&instructions[0], &instructions[127], &type_system}; std::array cp = {&instructions[8], &instructions[17], &instructions[23], &instructions[49], &instructions[73], &instructions[103]}; diff --git a/verification/absint/tests/reg_context_test.cpp b/verification/absint/tests/reg_context_test.cpp index 18b0285b100f1132e84421cdedebee4c3eda6115..56006e7b60abf3578f19b1ef4c3d6089a2cd9ec7 100644 --- a/verification/absint/tests/reg_context_test.cpp +++ b/verification/absint/tests/reg_context_test.cpp @@ -34,19 +34,23 @@ TEST_F(VerifierTest, AbsIntRegContext) TypeSystem type_system {sort["Bot"], sort["Top"]}; Variables variables; - auto i8 = type_system.Parametric(sort["i8"])(); - auto i16 = type_system.Parametric(sort["i16"])(); - auto i32 = type_system.Parametric(sort["i32"])(); - auto i64 = type_system.Parametric(sort["i64"])(); + auto i8 = type_system.Parametric(sort["i8"]).WithTypeArgs({}, &type_system); + auto i16 = type_system.Parametric(sort["i16"]).WithTypeArgs({}, &type_system); + auto i32 = type_system.Parametric(sort["i32"]).WithTypeArgs({}, &type_system); + auto i64 = type_system.Parametric(sort["i64"]).WithTypeArgs({}, &type_system); - i8 << i16 << i32 << i64; + MakeSubtype(i8, i16, &type_system); + MakeSubtype(i16, i32, &type_system); + MakeSubtype(i32, i64, &type_system); - auto u8 = type_system.Parametric(sort["u8"])(); - auto u16 = type_system.Parametric(sort["u16"])(); - auto u32 = type_system.Parametric(sort["u32"])(); - auto u64 = type_system.Parametric(sort["u64"])(); + auto u8 = type_system.Parametric(sort["u8"]).WithTypeArgs({}, &type_system); + auto u16 = type_system.Parametric(sort["u16"]).WithTypeArgs({}, &type_system); + auto u32 = type_system.Parametric(sort["u32"]).WithTypeArgs({}, &type_system); + auto u64 = type_system.Parametric(sort["u64"]).WithTypeArgs({}, &type_system); - u8 << u16 << u32 << u64; + MakeSubtype(u8, u16, &type_system); + MakeSubtype(u16, u32, &type_system); + MakeSubtype(u32, u64, &type_system); auto nv = [&variables] { return variables.NewVar(); }; @@ -59,16 +63,16 @@ TEST_F(VerifierTest, AbsIntRegContext) ctx1[-1] = av1; ctx2[0] = av2; - auto ctx3 = ctx1 & ctx2; + auto ctx3 = RcUnion(&ctx1, &ctx2, &type_system); - ctx3.RemoveInconsistentRegs(); + ctx3.RemoveInconsistentRegs(&type_system); EXPECT_EQ(ctx3.Size(), 0); ctx1[0] = av1; - ctx3 = ctx1 & ctx2; + ctx3 = RcUnion(&ctx1, &ctx2, &type_system); - ctx3.RemoveInconsistentRegs(); + ctx3.RemoveInconsistentRegs(&type_system); EXPECT_EQ(ctx3.Size(), 1); EXPECT_EQ(ctx3[0].GetAbstractType().GetType(), i32); diff --git a/verification/absint/verification_context.h b/verification/absint/verification_context.h index 64bb7a19e064808602ce7a96f33378c9c6f1fbcb..2662dbf46590f9ee2590ddc18d5676866a1e8975 100644 --- a/verification/absint/verification_context.h +++ b/verification/absint/verification_context.h @@ -24,7 +24,7 @@ #include "verification/cflow/cflow_info.h" #include "verification/jobs/job.h" #include "verification/jobs/cache.h" -#include "verification/type/type_systems.h" +#include "verification/plugins.h" #include "verification/util/lazy.h" #include "verification/util/callable.h" #include "verification/value/variables.h" @@ -43,7 +43,8 @@ public: Job_ {job}, MethodClass_ {method_class_type}, ExecCtx_ {CflowInfo().InstMap().AddrStart(), - CflowInfo().InstMap().AddrEnd()} + CflowInfo().InstMap().AddrEnd(), &panda_types.GetTypeSystem()}, + Plugin_ {plugin::GetLanguagePlugin(job.JobCachedMethod().GetSourceLang())} { // set checkpoints for reg_context storage // start of method is checkpoint too @@ -96,6 +97,11 @@ public: return Types_; } + TypeSystem const *GetTypeSystem() + { + return &Types_.GetTypeSystem(); + } + Var NewVar() { return Types_.NewVar(); @@ -111,12 +117,18 @@ public: ReturnType_ = type; } + plugin::Plugin const *GetPlugin() + { + return Plugin_; + } + private: PandaTypes &Types_; const Job &Job_; Type ReturnType_; Type MethodClass_; ExecContext ExecCtx_; + plugin::Plugin const *Plugin_; }; } // namespace panda::verifier diff --git a/verification/default_plugin.cpp b/verification/default_plugin.cpp new file mode 100644 index 0000000000000000000000000000000000000000..60970be37bf8342b63e0efd6da3c4af4e935926b --- /dev/null +++ b/verification/default_plugin.cpp @@ -0,0 +1,198 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "default_plugin.h" +#include "abs_int_inl_compat_checks.h" +#include "source_lang_enum.h" +#include "verification/jobs/cache.h" +#include "verification/type/type_type.h" +#include "verifier_messages.h" + +namespace panda::verifier::plugin { + +// Dummy implementation +// TODO(gogabr): make it more sensible +void DefaultPlugin::TypeSystemSetup([[maybe_unused]] PandaTypes *types) const {} + +static void SetArraySubtyping(Type t, PandaTypes *types) +{ + auto type_system = &types->GetTypeSystem(); + PandaVector to_process; + PandaVector just_subtype; + t.ForAllSupertypes(type_system, [&](Type st) { + if (!types->Array().ExistsWithTypeArgs({+st}, type_system)) { + to_process.emplace_back(st); + } else { + just_subtype.emplace_back(st); + } + return true; + }); + auto array_type = types->Array().WithTypeArgs({+t}, type_system); + for (const auto &st : just_subtype) { + MakeSubtype(array_type, types->Array().WithTypeArgs({+st}, type_system), type_system); + } + for (const auto &st : to_process) { + MakeSubtype(array_type, types->Array().WithTypeArgs({+st}, type_system), type_system); + SetArraySubtyping(st, types); + } +} + +Type TypeOfArray(CachedClass const *klass, PandaTypes *types) +{ + ASSERT(klass->flags[CachedClass::Flag::ARRAY_CLASS]); + + Type type {}; + auto type_system = &types->GetTypeSystem(); + auto component = klass->GetArrayComponent(); + if (component.HasRef()) { + auto component_type = types->TypeOf(component.Get()); + type = types->Array().WithTypeArgs({+component_type}, type_system); + SetArraySubtyping(component_type, types); + } else { + type = types->Array().WithTypeArgs({+types->Top()}, type_system); + LOG_VERIFIER_JAVA_TYPES_ARRAY_COMPONENT_TYPE_IS_UNDEFINED(); + } + MakeSubtype(type, types->ArrayType(), type_system); + if (klass->flags[CachedClass::Flag::OBJECT_ARRAY_CLASS]) { + MakeSubtype(type, types->ObjectArrayType(), type_system); + } + + return type; +} + +Type DefaultPlugin::TypeOfClass(CachedClass const *klass, PandaTypes *types) const +{ + auto type_system = &types->GetTypeSystem(); + PandaVector supertypes; + for (const auto &ancestor : klass->ancestors) { + // ancestor here cannot be unresolved descriptor + ASSERT(LibCache::IsRef(ancestor)); + supertypes.emplace_back(types->TypeOf(LibCache::GetRef(ancestor))); + } + + bool is_primitive = klass->flags[CachedClass::Flag::PRIMITIVE]; + auto class_name = klass->GetName(); + Type type {}; + if (klass->flags[CachedClass::Flag::ARRAY_CLASS]) { + type = TypeOfArray(klass, types); + } else if (is_primitive) { + type = types->TypeOf(klass->type_id); + } else { + type = types->TypeForName(class_name); + } + + if (!is_primitive) { + bool is_string = klass->flags[CachedClass::Flag::STRING_CLASS]; + if (is_string) { + MakeSubtype(type, types->StringType(), type_system); + } else { + MakeSubtype(type, types->ObjectType(), type_system); + } + MakeSubtype(types->NullRefType(), type, type_system); + MakeSubtype(type, types->RefType(), type_system); + MakeSubtype(types->TypeClass().WithTypeArgs({~type}, type_system), types->TypeClassType(), type_system); + MakeSubtype(types->TypeClassType(), types->RefType(), type_system); + } + if (klass->flags[CachedClass::Flag::ABSTRACT]) { + MakeSubtype(types->Abstract().WithTypeArgs({~type}, type_system), types->AbstractType(), type_system); + } + for (auto &super : supertypes) { + MakeSubtype(type, super, type_system); + } + + return type; +} + +Type DefaultPlugin::TypeOfMethod(CachedMethod const *method, PandaTypes *types) const +{ + auto type_system = &types->GetTypeSystem(); + auto &&sig = types->MethodSignature(*method); + Type type = types->Method().WithTypeArgs(sig, type_system); + MakeSubtype(type, types->MethodType(), type_system); + return type; +} + +Type DefaultPlugin::TypeOfTypeId(panda_file::Type::TypeId id, [[maybe_unused]] PandaTypes *types) const +{ + using TypeId = panda_file::Type::TypeId; + Type type {}; + switch (id) { + case TypeId::VOID: + type = types->Top(); + break; + case TypeId::U1: + type = types->U1(); + break; + case TypeId::I8: + type = types->I8(); + break; + case TypeId::U8: + type = types->U8(); + break; + case TypeId::I16: + type = types->I16(); + break; + case TypeId::U16: + type = types->U16(); + break; + case TypeId::I32: + type = types->I32(); + break; + case TypeId::U32: + type = types->U32(); + break; + case TypeId::I64: + type = types->I64(); + break; + case TypeId::U64: + type = types->U64(); + break; + case TypeId::F32: + type = types->F32(); + break; + case TypeId::F64: + type = types->F64(); + break; + case TypeId::REFERENCE: + type = types->RefType(); + break; + default: + LOG_VERIFIER_JAVA_TYPES_CANNOT_CONVERT_TYPE_ID_TO_TYPE(id); + type = types->Top(); + } + return type; +} + +Type DefaultPlugin::NormalizeType(Type type, [[maybe_unused]] PandaTypes *types) const +{ + auto type_system = &types->GetTypeSystem(); + Type result = type; + if (IsSubtype(type, types->Integral32Type(), type_system)) { + result = types->Normalize().WithTypeArgs({~types->Integral32Type()}, type_system); + } else if (IsSubtype(type, types->Integral64Type(), type_system)) { + result = types->Normalize().WithTypeArgs({~types->Integral64Type()}, type_system); + // NOLINTNEXTLINE(bugprone-branch-clone) + } else if (IsSubtype(type, types->F32(), type_system)) { + result = types->Normalize().WithTypeArgs({~types->F32()}, type_system); + } else if (IsSubtype(type, types->F64(), type_system)) { + result = types->Normalize().WithTypeArgs({~types->F64()}, type_system); + } else if (IsSubtype(type, types->MethodType(), type_system)) { + result = types->NormalizedMethod().WithTypeArgs(types->NormalizeMethodSignature(type.GetTypeArgs(type_system)), + type_system); + } + return result; +} + +} // namespace panda::verifier::plugin diff --git a/verification/default_plugin.h b/verification/default_plugin.h new file mode 100644 index 0000000000000000000000000000000000000000..c9e6d4f3bcc0307029fb9d603928da3d2eb92ca1 --- /dev/null +++ b/verification/default_plugin.h @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_VERIFICATION_DEFAULT_PLUGIN_H__ +#define PANDA_VERIFICATION_DEFAULT_PLUGIN_H__ + +#include "abs_int_inl_compat_checks.h" +#include "verification/jobs/cache.h" +#include "verification/plugins.h" + +namespace panda::verifier::plugin { + +class DefaultPlugin final : public Plugin { +public: + void TypeSystemSetup(PandaTypes *types) const override; + + CheckResult const &CheckFieldAccessViolation([[maybe_unused]] CachedField const *field, + [[maybe_unused]] CachedMethod const *from, + [[maybe_unused]] PandaTypes *types) const override + { + return CheckResult::ok; + } + + CheckResult const &CheckMethodAccessViolation([[maybe_unused]] CachedMethod const *method, + [[maybe_unused]] CachedMethod const *from, + [[maybe_unused]] PandaTypes *types) const override + { + return CheckResult::ok; + } + + CheckResult const &CheckClassAccessViolation([[maybe_unused]] CachedClass const *method, + [[maybe_unused]] CachedMethod const *from, + [[maybe_unused]] PandaTypes *types) const override + { + return CheckResult::ok; + } + + Type TypeOfClass(CachedClass const *klass, PandaTypes *types) const override; + Type TypeOfMethod(CachedMethod const *method, PandaTypes *types) const override; + Type TypeOfTypeId(panda_file::Type::TypeId id, PandaTypes *types) const override; + + Type NormalizeType(Type type, PandaTypes *types) const override; +}; + +} // namespace panda::verifier::plugin + +#endif diff --git a/verification/gen/BUILD.gn b/verification/gen/BUILD.gn index 89304c4b3b75852cf95e789b67ff97f199dfea45..7425ef5d65c6e6fa68dfd4caa522a2f3926f3fb0 100644 --- a/verification/gen/BUILD.gn +++ b/verification/gen/BUILD.gn @@ -50,12 +50,12 @@ ark_gen_file("verification_verifier_messages_h") { output_file = "$target_gen_dir/include/verifier_messages.h" } -ark_gen_file("lang_specifics_h") { +ark_gen_file("verification_gen_plugins_gen_inc") { extra_dependencies = [ "$ark_root:concat_plugins_yamls" ] - template_file = "$ark_root/verification/gen/templates/lang_specifics.h.erb" + template_file = "$ark_root/verification/gen/templates/plugins_gen.inc.erb" data_file = "$target_gen_dir/../../plugin_options.yaml" requires = [ "$ark_root/templates/plugin_options.rb" ] - output_file = "$target_gen_dir/include/lang_specifics.h" + output_file = "$target_gen_dir/include/plugins_gen.inc" } ark_gen_file("verification_verifier_messages_data_cpp") { diff --git a/verification/gen/templates/abs_int_inl_compat_checks.h.erb b/verification/gen/templates/abs_int_inl_compat_checks.h.erb index fe9bc4f4ec4eede709bb7a23dfe3b862c6564045..d1a4009a51537bd9f8550e53be9503067f8a51a0 100644 --- a/verification/gen/templates/abs_int_inl_compat_checks.h.erb +++ b/verification/gen/templates/abs_int_inl_compat_checks.h.erb @@ -16,9 +16,9 @@ #ifndef PANDA_VERIFIER_COMPAT_CHECKS_HPP_ #define PANDA_VERIFIER_COMPAT_CHECKS_HPP_ -#include "absint/verification_status.h" -#include "util/str.h" #include "verification/absint/panda_types.h" +#include "verification/absint/verification_status.h" +#include "verification/util/str.h" % checks = Verification.compatibility_checks namespace panda::verifier { diff --git a/verification/gen/templates/job_fill_gen.h.erb b/verification/gen/templates/job_fill_gen.h.erb index dd0ee9621556565da65a9b760d6f25797474af89..49fe150add0ae1df94985f8216dada5ca9da43b9 100644 --- a/verification/gen/templates/job_fill_gen.h.erb +++ b/verification/gen/templates/job_fill_gen.h.erb @@ -86,7 +86,11 @@ bool Job::ResolveIdentifiers(LibCache &cache) { % combination_flags += "FieldId_" % end % if i.properties.include?('type_id') -% combination_flags += "TypeId_" +% if i.verification.include?('type_id_class') +% combination_flags += "ClassId_" +% else +% combination_flags += "TypeId_" +% end % end % if i.properties.include?('string_id') % combination_flags += "StringId_" @@ -102,7 +106,7 @@ bool Job::ResolveIdentifiers(LibCache &cache) { % body_gen_parts = { % "Prop_" => %( % LOG_INST(); -% auto id = inst.GetId(); +% [[maybe_unused]] auto id = inst.GetId(); % ), % "CacheApi_" => %( % auto cache_api = cache.FastAPI(); @@ -113,7 +117,7 @@ bool Job::ResolveIdentifiers(LibCache &cache) { % const auto& pf = *cached_method_.file; % panda_file::LiteralTag tag; % panda_file::LiteralDataAccessor::LiteralValue value; -% if (!Runtime::GetLiteralTagAndValue(pf, id.AsFileId().GetOffset(), &tag, &value)) { +% if (!Runtime::GetLiteralTagAndValue(pf, pf.GetLiteralArrays()[id.AsIndex()], &tag, &value)) { % LOG(DEBUG, VERIFIER) << "JOBFILL: Cannot get literal tag with id=" << std::hex << id << " for offset 0x" << std::hex << inst.GetOffset(); % } else { % OptionalConstRef cached_class; @@ -205,6 +209,15 @@ bool Job::ResolveIdentifiers(LibCache &cache) { % LOG(DEBUG, VERIFIER) << "JOBFILL: Cannot resolve string class in method " << cached_method_.GetName(); % } % ), +% "ClassId_" => %( +% // We are in lda.type bytecode handler. acc type is going to be java.lang.Class +% auto cached_class_class = cache.FastAPI().GetClassClass(cached_method_); +% if (cached_class_class.HasRef()) { +% AddClass(inst.GetOffset(), cached_class_class.Get()); +% } else { +% LOG(ERROR, VERIFIER) << "JOBFILL: Cannot resolve Class class in method " << cached_method_.GetName(); +% } +% ), % "GetNext_" => %( % if (inst.IsLast()) { % return true; diff --git a/verification/gen/templates/lang_specifics.h.erb b/verification/gen/templates/lang_specifics.h.erb deleted file mode 100644 index bf7bea7851e0d76e54ff513358533f9f0054f934..0000000000000000000000000000000000000000 --- a/verification/gen/templates/lang_specifics.h.erb +++ /dev/null @@ -1,124 +0,0 @@ - -/** -* Copyright (c) 2021 Huawei Device Co., Ltd. -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -// Autogenerated file -- DO NOT EDIT! - -#include "source_languages.h" -#include "verification/absint/panda_types.h" -#include "verification/jobs/job.h" - -#ifndef VERIFIER_LANG_SPECIFICS_H -#define VERIFIER_LANG_SPECIFICS_H - -namespace panda::verifier { - -% Common::plugins.each do |plugin_lang, plugin_opts| -% if plugin_lang == "JAVA" - - inline TypeRelationship GetRelationship(const Type &type1, const Type &type2) - { - if (type1 == type2) { - return TypeRelationship::SAME; - } else if (type1 <= type2) { - return TypeRelationship::DESCENDANT; - } else { - return TypeRelationship::OTHER; - } - } - - inline TypeRelationship GetRelationship(const Job &job, PandaTypes &types, - const CachedClass &class1, - const CachedClass &class2) - { - auto lang = job.JobCachedMethod().GetSourceLang(); - if (lang == panda::panda_file::SourceLang::JAVA_8) { - TypeRelationship relationship = GetRelationship(types.TypeOf(class1), types.TypeOf(class2)); - if (relationship != TypeRelationship::SAME && class1.IsSamePackage(class2)) { - // Member access levels strength: - // SAME (same class) << NEIGHBOUR (same package) << DESCENDANT (subclass) << OTHER - return TypeRelationship::NEIGHBOUR; - } - return relationship; - } - // For non-java language context return TypeRelationship::SAME. - // Though it is not semantically correct, the TypeRelationship is used in access - // checks only, and SAME helps to pass the access checks for non-java context. - return TypeRelationship::SAME; - } - - // works for both fields and methods - template - AccessModifier GetAccessMode(const T *x) - { - if (x->flags[T::Flag::PRIVATE]) { - return AccessModifier::PRIVATE; - } else if (x->flags[T::Flag::PROTECTED]) { - return AccessModifier::PROTECTED; - } else if (x->flags[T::Flag::PUBLIC]) { - return AccessModifier::PUBLIC; - } else { - return AccessModifier::PACKAGE; - } - } - -% end -% end - - inline const CheckResult& CheckFieldAccessViolation([[maybe_unused]] const CachedField *field, - [[maybe_unused]] const Job ¤tJob, - [[maybe_unused]] PandaTypes &types) { -% Common::plugins.each do |plugin_lang, plugin_opts| -% if plugin_lang == "JAVA" - auto relation = GetRelationship(currentJob, types, currentJob.JobCachedMethod().klass, field->klass); - AccessModifier access_mode = GetAccessMode(field); - return panda::verifier::CheckFieldAccess(relation, access_mode); -% end -% end - return CheckResult::ok; - } - - inline const CheckResult& CheckMethodAccessViolation([[maybe_unused]] const CachedMethod *method, - [[maybe_unused]] const Job ¤tJob, - [[maybe_unused]] PandaTypes &types) { -% Common::plugins.each do |plugin_lang, plugin_opts| -% if plugin_lang == "JAVA" - auto relation = GetRelationship(currentJob, types, currentJob.JobCachedMethod().klass, method->klass); - auto access_mode = GetAccessMode(method); - return panda::verifier::CheckCall(relation, access_mode); -% end -% end - return CheckResult::ok; - } - - inline const CheckResult& CheckClassAccessViolation([[maybe_unused]] const CachedClass *cached_class, - [[maybe_unused]] const Job ¤tJob, - [[maybe_unused]] PandaTypes &types) { -% Common::plugins.each do |plugin_lang, plugin_opts| -% if plugin_lang == "JAVA" - auto relation = GetRelationship(currentJob, types, currentJob.JobCachedMethod().klass, *cached_class); - if (relation == TypeRelationship::OTHER) { - auto is_public = cached_class->flags[CachedClass::Flag::PUBLIC]; - if (!is_public) { - return CheckResult::protected_class; - } - } -% end -% end - return CheckResult::ok; - } -} - -#endif // VERIFIER_LANG_SPECIFICS_H diff --git a/verification/gen/templates/plugins_gen.inc.erb b/verification/gen/templates/plugins_gen.inc.erb new file mode 100644 index 0000000000000000000000000000000000000000..59e68872d1c996fdb33d0e27456b716393b0ae6d --- /dev/null +++ b/verification/gen/templates/plugins_gen.inc.erb @@ -0,0 +1,63 @@ + +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Autogenerated file -- DO NOT EDIT! + +#include +#include +#include +#include "macros.h" +#include "source_lang_enum.h" +#include "verification/plugins.h" +#include "verification/default_plugin.h" +% Common::plugins.each do |plugin_lang, plugin_opts| +% next unless plugin_opts["Verification"] +% if plugin_opts["Verification"]["header"] +#include "<%= plugin_opts["Verification"]["header"] %>" +% end +% end + +namespace panda::verifier::plugin { + +% Common::plugins.each do |plugin_lang, plugin_opts| +% next unless plugin_opts["Verification"] +% if plugin_opts["Verification"]["header"] +static <%= plugin_opts["Verification"]["plugin_class"] %> <%= plugin_lang %>_plugin; +% end +% end +static DefaultPlugin dflt; + +static std::unordered_map const plugins_map { +% Common::plugins.each do |plugin_lang, plugin_opts| +% short_plugin_lang = plugin_lang == "JAVA" ? "JAVA_8" : plugin_lang +% next unless plugin_opts["Verification"] +% if plugin_opts["Verification"]["plugin_class"] + { panda_file::SourceLang::<%= short_plugin_lang %>, + &<%= plugin_lang %>_plugin }, +% end +% end + { panda_file::SourceLang::PANDA_ASSEMBLY, &dflt} +}; + +Plugin const *GetLanguagePlugin(panda_file::SourceLang lang) +{ + if (plugins_map.count(lang) > 0) { + return plugins_map.at(lang); + } + return &dflt; +} + +} \ No newline at end of file diff --git a/verification/jobs/cache.cpp b/verification/jobs/cache.cpp index 664712c0ae6b0b0a0a0c62bfa8ed3ec2e344cac6..ed7224394c055f26e5f9f65040d49fb127468818 100644 --- a/verification/jobs/cache.cpp +++ b/verification/jobs/cache.cpp @@ -288,6 +288,7 @@ void FastAPIClassRW::InitializeRootClasses(panda_file::SourceLang lang) data.object_descr = obj_descriptor; data.string_descr = ctx->GetStringClassDescriptor(); + data.class_descr = ctx->GetClassClassDescriptor(); data.string_array_descr = ctx->GetStringArrayClassDescriptor(); VerificationInitAPI v_api = ctx->GetVerificationInitAPI(); @@ -1067,6 +1068,14 @@ OptionalConstRef FastAPIClassRW::GetStringClass(const Lib return ResolveAndLink(src_lang, descriptor); } +template <> +OptionalConstRef FastAPIClassRW::GetClassClass(const LibCache::CachedMethod &cachedMethod) +{ + panda_file::SourceLang src_lang = cachedMethod.GetSourceLang(); + auto descriptor = GetContext(src_lang).class_descr; + return ResolveAndLink(src_lang, descriptor); +} + template <> OptionalConstRef FastAPIClassRW::GetStringArrayClass(panda_file::SourceLang src_lang) { diff --git a/verification/jobs/cache.h b/verification/jobs/cache.h index 7fd921ac7747c4e19cf7415cb382c02b0780ce5c..bf4d3999131e6608614ce1fc95f5d71ae50775c3 100644 --- a/verification/jobs/cache.h +++ b/verification/jobs/cache.h @@ -424,6 +424,7 @@ public: FileCache file_cache; PandaUnorderedMap indexes_cache; DescriptorString string_descr; + DescriptorString class_descr; DescriptorString object_descr; DescriptorString string_array_descr; @@ -482,6 +483,7 @@ public: // signatures for convenience, see job_fill_gen.h.erb OptionalConstRef GetStringClass(const CachedMethod &cachedMethod); OptionalConstRef GetStringArrayClass(panda_file::SourceLang src_lang); + OptionalConstRef GetClassClass(const CachedMethod &cachedMethod); void ProcessFile(const panda_file::File &pf); diff --git a/verification/jobs/job.cpp b/verification/jobs/job.cpp index d4b41f06ca485f164e488b39f1d5e2843a5d0013..f6535f9e32c869ca13a0984a94ca2f84d6de46d5 100644 --- a/verification/jobs/job.cpp +++ b/verification/jobs/job.cpp @@ -31,7 +31,7 @@ bool Job::UpdateTypes(PandaTypes &types) const return is_valid; }; ForAllCachedClasses([&](const LibCache::CachedClass &klass) { result = result && has_type(klass); }); - ForAllCachedMethods([&types](const LibCache::CachedMethod &method) { types.NormalizedMethodSignature(method); }); + ForAllCachedMethods([&](const LibCache::CachedMethod &method) { types.NormalizedMethodSignature(method); }); ForAllCachedFields([&](const LibCache::CachedField &field) { result = result && has_type(field.klass) && has_type(LibCache::GetRef(field.type)); }); diff --git a/verification/jobs/job.h b/verification/jobs/job.h index 1553246e297aa0f6052e280966fe6470a4637ea1..f606a1f9e2556dea7ae2292dd06d1e4500a0cb24 100644 --- a/verification/jobs/job.h +++ b/verification/jobs/job.h @@ -19,6 +19,7 @@ #include "verification/jobs/cache.h" #include "verification/cflow/cflow_info.h" #include "verification/verification_options.h" +#include "verification/plugins.h" #include "runtime/include/method.h" @@ -30,7 +31,10 @@ namespace panda::verifier { class Job { public: Job(Method &method, const CachedMethod &cached_method, const MethodOptions &options) - : method_ {method}, cached_method_ {cached_method}, options_ {options} + : method_ {method}, + cached_method_ {cached_method}, + options_ {options}, + plugin_ {plugin::GetLanguagePlugin(cached_method.GetSourceLang())} { } @@ -78,6 +82,11 @@ public: return *cflow_info_; } + const plugin::Plugin *JobPlugin() const + { + return plugin_; + } + template void ForAllCachedClasses(Handler &&handler) const { @@ -115,6 +124,8 @@ private: const MethodOptions &options_; PandaUniquePtr cflow_info_; + plugin::Plugin const *const plugin_; + // TODO(vdyadov): store file_id for double check during verification // offset -> cache item PandaUnorderedMap> fields_; diff --git a/verification/jobs/thread_pool.cpp b/verification/jobs/thread_pool.cpp index 39181ecee02d44368378b9adf033ae9b1320cf2b..dbff52a06821625d5797910b23f3e7fbf86a79fe 100644 --- a/verification/jobs/thread_pool.cpp +++ b/verification/jobs/thread_pool.cpp @@ -22,7 +22,7 @@ namespace panda::verifier { bool Processor::Init() { PandaString thread_name {"verifier#"}; - thread_name += NumToStr(types_.GetThreadNum()); + thread_name += NumToStr(thread_num_); panda::os::thread::SetThreadName(panda::os::thread::GetNativeHandle(), thread_name.c_str()); LOG(DEBUG, VERIFIER) << "Thread ID " << panda::os::thread::GetCurrentThreadId() << " is named " << thread_name; return true; @@ -51,7 +51,8 @@ bool Processor::Process(Task task) return true; } auto &cache = opt_cache.Get(); - auto opt_cached_method = cache.FastAPI().GetMethod(method.GetClass()->GetSourceLang(), method.GetUniqId(), true); + auto source_lang = method.GetClass()->GetSourceLang(); + auto opt_cached_method = cache.FastAPI().GetMethod(source_lang, method.GetUniqId(), true); if (!opt_cached_method.HasRef()) { LOG(ERROR, VERIFIER) << "Method '" << method.GetFullName(true) << "' not found in verifier cache, cannot create a job."; @@ -61,11 +62,11 @@ bool Processor::Process(Task task) const auto &method_options = panda::Runtime::GetCurrent()->GetVerificationConfig()->opts_.Debug.GetMethodOptions(); const auto &verif_method_options = method_options[method_name]; LOG(DEBUG, VERIFIER) << "Verification config for '" << method_name << "': " << verif_method_options.GetName(); - LOG(INFO, VERIFIER) << "Verifier thread #" << types_.GetThreadNum() << " started verification of method '" + LOG(INFO, VERIFIER) << "Verifier thread #" << thread_num_ << " started verification of method '" << method.GetFullName(true) << "'"; Job job {method, opt_cached_method.Get(), verif_method_options}; - bool result = job.DoChecks(cache, types_); + bool result = job.DoChecks(cache, *types_.at(source_lang)); method.SetVerified(result); LOG(INFO, VERIFIER) << "Verification result for method " << method.GetFullName(true) << (result ? ": OK" : ": FAIL"); @@ -78,8 +79,11 @@ bool Processor::Destroy() bool show_subtyping = panda::Runtime::GetCurrentSync( [](auto &instance) { return instance.GetVerificationConfig()->opts_.Debug.Show.TypeSystem; }); if (show_subtyping) { - LOG(DEBUG, VERIFIER) << "Typesystem of verifier thread #" << types_.GetThreadNum(); - types_.DisplayTypeSystem([](const auto &str) { LOG(DEBUG, VERIFIER) << str; }); + LOG(DEBUG, VERIFIER) << "Typesystems of verifier thread #" << thread_num_; + for (auto lang : panda_file::LANG_ITERATOR) { + LOG(DEBUG, VERIFIER) << lang; + types_.at(lang)->DisplayTypeSystem([](const auto &str) { LOG(DEBUG, VERIFIER) << str; }); + } } return true; } diff --git a/verification/jobs/thread_pool.h b/verification/jobs/thread_pool.h index 5be8e82df9d4b906ce4eecc104c8235f4f0f3909..f14e9dfaddf35ff9e7d419149cf03b8c11e78ae7 100644 --- a/verification/jobs/thread_pool.h +++ b/verification/jobs/thread_pool.h @@ -101,10 +101,13 @@ class Processor final : public ProcessorInterface { public: explicit Processor(std::monostate dummy) // Atomic with acq_rel order reason: could be relaxed, but we need to allow reinitialization for tests - : types_ {next_thread_num_.fetch_add(1, std::memory_order_acq_rel)} + : thread_num_ {next_thread_num_.fetch_add(1, std::memory_order_acq_rel)} { // GCC 8 doesn't accept [[maybe_unused]] in this particular case (void)dummy; + for (auto lang : panda_file::LANG_ITERATOR) { + types_.emplace(lang, std::make_unique(lang)); + } } ~Processor() = default; NO_COPY_SEMANTIC(Processor); @@ -115,7 +118,8 @@ public: bool Destroy() override; private: - PandaTypes types_; + ThreadNum thread_num_; + std::unordered_map> types_; inline static std::atomic next_thread_num_ {0}; friend class ThreadPool; }; diff --git a/verification/plugins.cpp b/verification/plugins.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39b5fe7681aa5c83283cecaf56243464f10deb08 --- /dev/null +++ b/verification/plugins.cpp @@ -0,0 +1 @@ +#include "plugins_gen.inc" diff --git a/verification/plugins.h b/verification/plugins.h new file mode 100644 index 0000000000000000000000000000000000000000..537960628759cd521d3328ed4133bea6e01f8953 --- /dev/null +++ b/verification/plugins.h @@ -0,0 +1,50 @@ +/** + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_VERIFICATION_PLUGINS_H__ +#define PANDA_VERIFICATION_PLUGINS_H__ + +#include "abs_int_inl_compat_checks.h" +#include "source_lang_enum.h" +#include "verification/jobs/cache.h" +#include "verification/type/type_type.h" + +namespace panda::verifier::plugin { + +class Plugin { +public: + virtual void TypeSystemSetup(PandaTypes *types) const = 0; + + virtual CheckResult const &CheckFieldAccessViolation(CachedField const *field, CachedMethod const *from, + PandaTypes *types) const = 0; + + virtual CheckResult const &CheckMethodAccessViolation(CachedMethod const *method, CachedMethod const *from, + PandaTypes *types) const = 0; + + virtual CheckResult const &CheckClassAccessViolation(CachedClass const *klass, CachedMethod const *from, + PandaTypes *types) const = 0; + + virtual Type TypeOfClass(CachedClass const *klass, PandaTypes *types) const = 0; + virtual Type TypeOfMethod(CachedMethod const *method, PandaTypes *types) const = 0; + virtual Type TypeOfTypeId(panda_file::Type::TypeId id, PandaTypes *types) const = 0; + + virtual Type NormalizeType(Type type, PandaTypes *types) const = 0; +}; + +Plugin const *GetLanguagePlugin(panda_file::SourceLang lang); + +} // namespace panda::verifier::plugin + +#endif diff --git a/verification/public.cpp b/verification/public.cpp index ecd64a78d32246b7030be9d328a21ae587717e20..ed4641c6746209090a1beb02cc32d395cd9f1569 100644 --- a/verification/public.cpp +++ b/verification/public.cpp @@ -19,7 +19,6 @@ #include "verification/config/context/context.h" #include "verification/cache/results_cache.h" #include "verification/jobs/thread_pool.h" -#include "verification/type/type_systems.h" namespace panda::verifier { @@ -72,7 +71,6 @@ struct Service { Service *CreateService(Config const *config, int num_threads, panda::mem::InternalAllocatorPtr allocator, std::string const &cache_file_name) { - TypeSystems::Initialize(num_threads); ThreadPool::Initialize(allocator, num_threads); if (!cache_file_name.empty()) { VerificationResultCache::Initialize(cache_file_name); @@ -93,7 +91,6 @@ void DestroyService(Service *service, bool update_cache_file) return; } ThreadPool::Destroy(); - TypeSystems::Destroy(); VerificationResultCache::Destroy(update_cache_file); delete service; } diff --git a/verification/type/Type.cmake b/verification/type/Type.cmake index 60040fef2f97215ce370d70f0437a5e2e1be0632..6931c51b406fbdd84ba5494fb9d1b55cc8c39da5 100644 --- a/verification/type/Type.cmake +++ b/verification/type/Type.cmake @@ -12,15 +12,12 @@ # limitations under the License. set(TYPE_SOURCES + ${VERIFICATION_SOURCES_DIR}/type/type_arg.cpp ${VERIFICATION_SOURCES_DIR}/type/type_type.cpp - ${VERIFICATION_SOURCES_DIR}/type/type_param.cpp - ${VERIFICATION_SOURCES_DIR}/type/type_params.cpp ${VERIFICATION_SOURCES_DIR}/type/type_set.cpp ${VERIFICATION_SOURCES_DIR}/type/type_parametric.cpp - ${VERIFICATION_SOURCES_DIR}/type/type_systems.cpp ) set(TYPE_TESTS_SOURCES ${VERIFICATION_SOURCES_DIR}/type/tests/type_system_test.cpp ) - diff --git a/verification/type/subtyping_closure.h b/verification/type/subtyping_closure.h index 1b976e9ea418190c84121304c0f9a952c14b5bd3..908df2ca00dd7a4c8172a38eb1bbd2bd2868fb50 100644 --- a/verification/type/subtyping_closure.h +++ b/verification/type/subtyping_closure.h @@ -16,9 +16,9 @@ #ifndef _PANDA_SUBTYPING_CLOSURE_HPP #define _PANDA_SUBTYPING_CLOSURE_HPP -#include "type_sort.h" -#include "type_index.h" +#include "type_arg.h" #include "type_info.h" +#include "type_sort.h" #include "runtime/include/mem/panda_containers.h" diff --git a/verification/type/tests/type_system_test.cpp b/verification/type/tests/type_system_test.cpp index ffba0cb10f5b2fcc75b0854c764c9bb848d5b544..191a3f5b48146a6dd6dea5ae73edd80d9a059914 100644 --- a/verification/type/tests/type_system_test.cpp +++ b/verification/type/tests/type_system_test.cpp @@ -20,7 +20,6 @@ #include "type/type_system.h" #include "type/type_sort.h" #include "type/type_image.h" -#include "type/type_systems.h" #include "include/runtime.h" @@ -32,148 +31,151 @@ namespace panda::verifier::test { TEST_F(VerifierTest, TypeSystemIncrementalClosure) { - TypeSystems::Destroy(); - TypeSystems::Initialize(2); - auto &&typesystem = TypeSystems::Get(TypeSystemKind::PANDA, static_cast(1)); - auto paramType = [&typesystem](const auto &name) { - return typesystem.Parametric(TypeSystems::GetSort(TypeSystemKind::PANDA, static_cast(1), name)); - }; + SortNames sort_names {"Bot", "Top"}; + TypeSystem type_system {sort_names["Bot"], sort_names["Top"]}; + auto paramType = [&type_system, &sort_names](const auto &name) { return type_system.Parametric(sort_names[name]); }; - typesystem.SetIncrementalRelationClosureMode(true); - typesystem.SetDeferIncrementalRelationClosure(false); + type_system.SetIncrementalRelationClosureMode(true); + type_system.SetDeferIncrementalRelationClosure(false); - auto bot = typesystem.Bot(); - auto top = typesystem.Top(); + auto bot = type_system.Bot(); + auto top = type_system.Top(); - auto i8 = paramType("i8")(); - auto i16 = paramType("i16")(); - auto i32 = paramType("i32")(); - auto i64 = paramType("i64")(); + auto i8 = paramType("i8").WithTypeArgs({}, &type_system); + auto i16 = paramType("i16").WithTypeArgs({}, &type_system); + auto i32 = paramType("i32").WithTypeArgs({}, &type_system); + auto i64 = paramType("i64").WithTypeArgs({}, &type_system); - auto u8 = paramType("u8")(); - auto u16 = paramType("u16")(); - auto u32 = paramType("u32")(); - auto u64 = paramType("u64")(); + auto u8 = paramType("u8").WithTypeArgs({}, &type_system); + auto u16 = paramType("u16").WithTypeArgs({}, &type_system); + auto u32 = paramType("u32").WithTypeArgs({}, &type_system); + auto u64 = paramType("u64").WithTypeArgs({}, &type_system); auto method = paramType("method"); - auto top_method_of3args = method(-bot >> -bot >> +top); - auto bot_method_of3args = method(-top >> -top >> +bot); + auto top_method_of3args = method.WithTypeArgs({-bot, -bot, +top}, &type_system); + auto bot_method_of3args = method.WithTypeArgs({-top, -top, +bot}, &type_system); - auto method1 = method(-i8 >> -i8 >> +i64); - auto method2 = method(-i32 >> -i16 >> +i32); + auto method1 = method.WithTypeArgs({-i8, -i8, +i64}, &type_system); + auto method2 = method.WithTypeArgs({-i32, -i16, +i32}, &type_system); // method2 <: method1 - auto method3 = method(-i16 >> -method2 >> +method1); - auto method4 = method(-i64 >> -method1 >> +method2); + auto method3 = method.WithTypeArgs({-i16, -method2, +method1}, &type_system); + auto method4 = method.WithTypeArgs({-i64, -method1, +method2}, &type_system); // method4 <: method3 - EXPECT_TRUE(bot <= i8); - EXPECT_TRUE(bot <= u64); + EXPECT_TRUE(IsSubtype(bot, i8, &type_system)); + EXPECT_TRUE(IsSubtype(bot, u64, &type_system)); - EXPECT_TRUE(i8 <= top); - EXPECT_TRUE(u64 <= top); + EXPECT_TRUE(IsSubtype(i8, top, &type_system)); + EXPECT_TRUE(IsSubtype(u64, top, &type_system)); - i8 << (i16 | i32) << i64; - (u8 | u16) << (u32 | u64); + for (auto ii : {i16, i32}) { + MakeSubtype(i8, ii, &type_system); + MakeSubtype(ii, i64, &type_system); + } + for (auto ul : {u8, u16}) { + for (auto uh : {u32, u64}) { + MakeSubtype(ul, uh, &type_system); + } + } - EXPECT_TRUE(i8 <= i64); - EXPECT_TRUE(i16 <= i64); - EXPECT_TRUE(i32 <= i64); - EXPECT_FALSE(i16 <= i32); + EXPECT_TRUE(IsSubtype(i8, i64, &type_system)); + EXPECT_TRUE(IsSubtype(i16, i64, &type_system)); + EXPECT_TRUE(IsSubtype(i32, i64, &type_system)); + EXPECT_FALSE(IsSubtype(i16, i32, &type_system)); - EXPECT_TRUE(u8 <= u64); - EXPECT_TRUE(u16 <= u64); - EXPECT_FALSE(u8 <= u16); - EXPECT_FALSE(u32 <= u64); + EXPECT_TRUE(IsSubtype(u8, u64, &type_system)); + EXPECT_TRUE(IsSubtype(u16, u64, &type_system)); + EXPECT_FALSE(IsSubtype(u8, u16, &type_system)); + EXPECT_FALSE(IsSubtype(u32, u64, &type_system)); - EXPECT_TRUE(method2 <= method1); - EXPECT_FALSE(method1 <= method2); + EXPECT_TRUE(IsSubtype(method2, method1, &type_system)); + EXPECT_FALSE(IsSubtype(method1, method2, &type_system)); - EXPECT_TRUE(method4 <= method3); - EXPECT_FALSE(method3 <= method4); + EXPECT_TRUE(IsSubtype(method4, method3, &type_system)); + EXPECT_FALSE(IsSubtype(method3, method4, &type_system)); - EXPECT_TRUE(bot_method_of3args <= method1); - EXPECT_TRUE(bot_method_of3args <= method4); + EXPECT_TRUE(IsSubtype(bot_method_of3args, method1, &type_system)); + EXPECT_TRUE(IsSubtype(bot_method_of3args, method4, &type_system)); - EXPECT_TRUE(method1 <= top_method_of3args); - EXPECT_TRUE(method4 <= top_method_of3args); - TypeSystems::Destroy(); + EXPECT_TRUE(IsSubtype(method1, top_method_of3args, &type_system)); + EXPECT_TRUE(IsSubtype(method4, top_method_of3args, &type_system)); } TEST_F(VerifierTest, TypeSystemClosureAtTheEnd) { - TypeSystems::Destroy(); - TypeSystems::Initialize(1); - auto &&typesystem = TypeSystems::Get(TypeSystemKind::PANDA, static_cast(0)); - auto paramType = [&typesystem](const auto &name) { - return typesystem.Parametric(TypeSystems::GetSort(TypeSystemKind::PANDA, static_cast(0), name)); - }; + SortNames sort_names {"Bot", "Top"}; + TypeSystem type_system {sort_names["Bot"], sort_names["Top"]}; + auto paramType = [&type_system, &sort_names](const auto &name) { return type_system.Parametric(sort_names[name]); }; - typesystem.SetIncrementalRelationClosureMode(false); - typesystem.SetDeferIncrementalRelationClosure(false); + type_system.SetIncrementalRelationClosureMode(false); + type_system.SetDeferIncrementalRelationClosure(false); - auto bot = typesystem.Bot(); - auto top = typesystem.Top(); + auto bot = type_system.Bot(); + auto top = type_system.Top(); - auto i8 = paramType("i8")(); - auto i16 = paramType("i16")(); - auto i32 = paramType("i32")(); - auto i64 = paramType("i64")(); + auto i8 = paramType("i8").WithTypeArgs({}, &type_system); + auto i16 = paramType("i16").WithTypeArgs({}, &type_system); + auto i32 = paramType("i32").WithTypeArgs({}, &type_system); + auto i64 = paramType("i64").WithTypeArgs({}, &type_system); - auto u8 = paramType("u8")(); - auto u16 = paramType("u16")(); - auto u32 = paramType("u32")(); - auto u64 = paramType("u64")(); + auto u8 = paramType("u8").WithTypeArgs({}, &type_system); + auto u16 = paramType("u16").WithTypeArgs({}, &type_system); + auto u32 = paramType("u32").WithTypeArgs({}, &type_system); + auto u64 = paramType("u64").WithTypeArgs({}, &type_system); auto method = paramType("method"); - auto top_method_of3args = method(-bot >> -bot >> +top); - auto bot_method_of3args = method(-top >> -top >> +bot); + auto top_method_of3args = method.WithTypeArgs({-bot, -bot, +top}, &type_system); + auto bot_method_of3args = method.WithTypeArgs({-top, -top, +bot}, &type_system); - auto method1 = method(-i8 >> -i8 >> +i64); - auto method2 = method(-i32 >> -i16 >> +i32); + auto method1 = method.WithTypeArgs({-i8, -i8, +i64}, &type_system); + auto method2 = method.WithTypeArgs({-i32, -i16, +i32}, &type_system); // method2 <: method1 - auto method3 = method(-i16 >> -method2 >> +method1); - auto method4 = method(-i64 >> -method1 >> +method2); + auto method3 = method.WithTypeArgs({-i16, -method2, +method1}, &type_system); + auto method4 = method.WithTypeArgs({-i64, -method1, +method2}, &type_system); // method4 <: method3 - i8 << (i16 | i32) << i64; - (u8 | u16) << (u32 | u64); + for (auto ii : {i16, i32}) { + MakeSubtype(i8, ii, &type_system); + MakeSubtype(ii, i64, &type_system); + } + for (auto ul : {u8, u16}) { + for (auto uh : {u32, u64}) { + MakeSubtype(ul, uh, &type_system); + } + } // before closure all methods are unrelated - EXPECT_FALSE(method2 <= method1); - EXPECT_FALSE(method1 <= method2); + EXPECT_FALSE(IsSubtype(method2, method1, &type_system)); + EXPECT_FALSE(IsSubtype(method1, method2, &type_system)); - EXPECT_FALSE(method4 <= method3); - EXPECT_FALSE(method3 <= method4); + EXPECT_FALSE(IsSubtype(method4, method3, &type_system)); + EXPECT_FALSE(IsSubtype(method3, method4, &type_system)); - EXPECT_FALSE(bot_method_of3args <= method1); - EXPECT_FALSE(bot_method_of3args <= method4); + EXPECT_FALSE(IsSubtype(bot_method_of3args, method1, &type_system)); + EXPECT_FALSE(IsSubtype(bot_method_of3args, method4, &type_system)); - EXPECT_FALSE(method1 <= top_method_of3args); - EXPECT_FALSE(method4 <= top_method_of3args); + EXPECT_FALSE(IsSubtype(method1, top_method_of3args, &type_system)); + EXPECT_FALSE(IsSubtype(method4, top_method_of3args, &type_system)); - typesystem.CloseSubtypingRelation(); + type_system.CloseSubtypingRelation(); - // after closure all realations are correct - EXPECT_TRUE(method2 <= method1); + // after closure all relations hold + EXPECT_TRUE(IsSubtype(method2, method1, &type_system)); - EXPECT_TRUE(method4 <= method3); - EXPECT_TRUE(bot_method_of3args <= method1); - EXPECT_TRUE(method4 <= top_method_of3args); - TypeSystems::Destroy(); + EXPECT_TRUE(IsSubtype(method4, method3, &type_system)); + EXPECT_TRUE(IsSubtype(bot_method_of3args, method1, &type_system)); + EXPECT_TRUE(IsSubtype(method4, top_method_of3args, &type_system)); } TEST_F(VerifierTest, TypeSystemLeastUpperBound) { - TypeSystems::Destroy(); - TypeSystems::Initialize(2); - auto &&typesystem = TypeSystems::Get(TypeSystemKind::PANDA, static_cast(1)); - auto paramType = [&typesystem](const auto &name) { - return typesystem.Parametric(TypeSystems::GetSort(TypeSystemKind::PANDA, static_cast(1), name)); - }; + SortNames sort_names {"Bot", "Top"}; + TypeSystem type_system {sort_names["Bot"], sort_names["Top"]}; + auto paramType = [&type_system, &sort_names](const auto &name) { return type_system.Parametric(sort_names[name]); }; /* G<-- @@ -203,39 +205,57 @@ TEST_F(VerifierTest, TypeSystemLeastUpperBound) to do not mislead other developers. */ - auto top = typesystem.Top(); + auto top = type_system.Top(); - auto a = paramType("A")(); - auto b = paramType("B")(); - auto c = paramType("C")(); - auto d = paramType("D")(); - auto e = paramType("E")(); - auto f = paramType("F")(); - auto g = paramType("G")(); + auto a = paramType("A").WithTypeArgs({}, &type_system); + auto b = paramType("B").WithTypeArgs({}, &type_system); + auto c = paramType("C").WithTypeArgs({}, &type_system); + auto d = paramType("D").WithTypeArgs({}, &type_system); + auto e = paramType("E").WithTypeArgs({}, &type_system); + auto f = paramType("F").WithTypeArgs({}, &type_system); + auto g = paramType("G").WithTypeArgs({}, &type_system); - a << d << g; - b << e << g; - b << f; - c << e; - c << f; + MakeSubtype(a, d, &type_system); + MakeSubtype(d, g, &type_system); - auto r = a & b; + MakeSubtype(b, e, &type_system); + MakeSubtype(e, g, &type_system); + + MakeSubtype(b, f, &type_system); + MakeSubtype(c, e, &type_system); + MakeSubtype(c, f, &type_system); + + auto a_supers = a.AllSupertypes(&type_system); + auto b_supers = b.AllSupertypes(&type_system); + auto c_supers = c.AllSupertypes(&type_system); + auto d_supers = d.AllSupertypes(&type_system); + auto e_supers = e.AllSupertypes(&type_system); + auto f_supers = f.AllSupertypes(&type_system); + auto g_supers = g.AllSupertypes(&type_system); + + auto r = TsIntersection(&a_supers, &b_supers, &type_system); EXPECT_EQ(r, (TypeSet {g, top})); - r = e & f; + r = TsIntersection(&e_supers, &f_supers, &type_system); EXPECT_EQ(r, TypeSet {top}); - r = c & d; + r = TsIntersection(&c_supers, &d_supers, &type_system); EXPECT_EQ(r, (TypeSet {g, top})); - r = a & b & c; + { + auto ab_supers = TsIntersection(&a_supers, &b_supers, &type_system); + r = TsIntersection(&ab_supers, &c_supers, &type_system); + } EXPECT_EQ(r, (TypeSet {g, top})); - r = a & b & c & f; + { + auto ab_supers = TsIntersection(&a_supers, &b_supers, &type_system); + auto abc_supers = TsIntersection(&ab_supers, &c_supers, &type_system); + r = TsIntersection(&abc_supers, &f_supers, &type_system); + } EXPECT_EQ(r, TypeSet {top}); - EXPECT_TRUE(r.TheOnlyType().IsTop()); - TypeSystems::Destroy(); + EXPECT_TRUE(type_system.IsTop(r.TheOnlyType())); } } // namespace panda::verifier::test diff --git a/verification/type/type_params.cpp b/verification/type/type_arg.cpp similarity index 54% rename from verification/type/type_params.cpp rename to verification/type/type_arg.cpp index c06d3c618da7dd62c293718c7e627a8a61e4b5d1..6cd07decf1a6ddd1846921048295b9d2248c35c0 100644 --- a/verification/type/type_params.cpp +++ b/verification/type/type_arg.cpp @@ -13,34 +13,15 @@ * limitations under the License. */ -#include "type_params.h" +#include "type_arg.h" #include "type_system.h" -#include "type_systems.h" -#include "type_tags.h" +#include "type_type.h" namespace panda::verifier { -TypeSystem &TypeParams::GetTypeSystem() const +Type TypeArg::GetType() const { - return TypeSystems::Get(kind_, threadnum_); -} - -bool TypeParams::operator<=(const TypeParams &rhs) const -{ - ASSERT(kind_ == rhs.kind_); - ASSERT(threadnum_ == rhs.threadnum_); - if (empty()) { - return true; - } - return GetTypeSystem().CheckIfLhsParamsSubtypeOfRhs(*this, rhs); -} - -TypeParams &TypeParams::operator>>(const TypeParam &p) -{ - ASSERT(kind_ == p.kind_); - ASSERT(threadnum_ == p.threadnum_); - push_back(p); - return *this; + return Type {GetInt()}; } } // namespace panda::verifier diff --git a/verification/type/type_index.h b/verification/type/type_arg.h similarity index 74% rename from verification/type/type_index.h rename to verification/type/type_arg.h index 9acea0c22d225ad939a4184ea2fce62960e44f87..003f0220329ba741aefdcbe7148bc853e9a42a43 100644 --- a/verification/type/type_index.h +++ b/verification/type/type_arg.h @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef _PANDA_TYPE_INDEX_HPP -#define _PANDA_TYPE_INDEX_HPP +#ifndef _PANDA_TYPE_ARG_H_ +#define _PANDA_TYPE_ARG_H_ #include "type_tags.h" @@ -25,27 +25,29 @@ #include "runtime/include/mem/panda_containers.h" namespace panda::verifier { -class TypeParamIdx : public TaggedIndex { +class Type; + +class TypeArg : public TaggedIndex { using Base = TaggedIndex; public: - TypeParamIdx(TypeVariance variance, TypeNum num) + TypeArg(TypeVariance variance, TypeNum num) { Base::SetTag<0>(variance); Base::SetInt(num); } - ~TypeParamIdx() = default; - TypeParamIdx &operator+() + ~TypeArg() = default; + TypeArg &operator+() { Base::SetTag<0>(TypeVariance::COVARIANT); return *this; } - TypeParamIdx &operator-() + TypeArg &operator-() { - Base::SetTag<0>(TypeVariance::CONTRVARIANT); + Base::SetTag<0>(TypeVariance::CONTRAVARIANT); return *this; } - TypeParamIdx &operator~() + TypeArg &operator~() { Base::SetTag<0>(TypeVariance::INVARIANT); return *this; @@ -54,9 +56,11 @@ public: { return Base::GetTag<0>(); } + + Type GetType() const; }; -using TypeParamsIdx = PandaVector; +using TypeArgs = PandaVector; } // namespace panda::verifier -#endif // !_PANDA_TYPE_INDEX_HPP +#endif // !_PANDA_TYPE_ARG_H_ diff --git a/verification/type/type_image.h b/verification/type/type_image.h index a5443d482041f49f690bd2a67b67fe7b1425a681..6d70c0af2f5fffda88b6a1ede2ab4c18dc884955 100644 --- a/verification/type/type_image.h +++ b/verification/type/type_image.h @@ -17,7 +17,6 @@ #define _PANDA_TYPE_IMAGE_HPP #include "type_type.h" -#include "type_params.h" #include "runtime/include/mem/panda_containers.h" #include "runtime/include/mem/panda_string.h" @@ -25,10 +24,7 @@ namespace panda::verifier { class TypeImage { public: - const SortNames &SNames_; - PandaUnorderedMap CachedImages_; - - TypeImage(const SortNames &names) : SNames_ {names} {} + TypeImage(SortNames const *names, TypeSystem const *tsys) : sort_names_ {names}, type_system_ {tsys} {} ~TypeImage() = default; PandaString ImageOfVariance(TypeVariance var) const @@ -36,7 +32,7 @@ public: switch (var) { case TypeVariance::COVARIANT: return "+"; - case TypeVariance::CONTRVARIANT: + case TypeVariance::CONTRAVARIANT: return "-"; case TypeVariance::INVARIANT: return "~"; @@ -46,50 +42,55 @@ public: return "?"; } - PandaString ImageOfTypeParam(const TypeParam &type_param) + PandaString ImageOfTypeArg(const TypeArg &type_arg) { - return ImageOfVariance(type_param.Variance()) + ImageOfType(type_param); + return ImageOfVariance(type_arg.Variance()) + ImageOfType(type_arg.GetType()); } - PandaString ImageOfTypeParams(const TypeParams ¶ms) + PandaString ImageOfTypeArgs(const TypeArgs &type_args) { - PandaString params_image {""}; - - if (params.size() != 0) { - params.ForEach([this, ¶ms_image](const auto &p) { - params_image += PandaString {params_image.empty() ? "( " : ", "}; - params_image += ImageOfTypeParam(p); - }); - params_image += " )"; + PandaString args_image {""}; + + if (type_args.size() != 0) { + for (const auto &p : type_args) { + args_image += PandaString {args_image.empty() ? "( " : ", "}; + args_image += ImageOfTypeArg(p); + }; + args_image += " )"; } - return params_image; + return args_image; } const PandaString &ImageOfType(const Type &type) { - auto cached = CachedImages_.find(type.Number()); - if (cached != CachedImages_.end()) { + auto cached = cached_images_.find(type.Number()); + if (cached != cached_images_.end()) { return cached->second; } - PandaString sort_name {SNames_[type.Sort()]}; + PandaString sort_name {(*sort_names_)[type.Sort(type_system_)]}; - const auto ¶ms = type.Params(); + const auto ¶ms = type.GetTypeArgs(type_system_); - auto &¶ms_image = ImageOfTypeParams(params); + auto &¶ms_image = ImageOfTypeArgs(params); PandaString val = sort_name + params_image; - CachedImages_[type.Number()] = val; + cached_images_[type.Number()] = val; - return CachedImages_[type.Number()]; + return cached_images_[type.Number()]; } const PandaString &operator[](const Type &type) { return ImageOfType(type); } + +private: + SortNames const *sort_names_; + TypeSystem const *type_system_; + PandaUnorderedMap cached_images_; }; } // namespace panda::verifier diff --git a/verification/type/type_info.h b/verification/type/type_info.h index c0ed1c49c13bf6c8a389e3adad3b815e3e75301c..f85dfde915e34cfa75d3495a08a4c90e902228d5 100644 --- a/verification/type/type_info.h +++ b/verification/type/type_info.h @@ -18,34 +18,34 @@ #include "verification/util/lazy.h" #include "type_sort.h" -#include "type_index.h" +#include "type_arg.h" namespace panda::verifier { class TypeInfo { public: - TypeInfo(SortIdx sort, const TypeParamsIdx ¶ms) : Sort_(sort), ParamsIdx_(params) {} - TypeInfo(SortIdx sort, TypeParamsIdx &¶ms) : Sort_(sort), ParamsIdx_(std::move(params)) {} + TypeInfo(SortIdx sort, const TypeArgs &type_args) : Sort_(sort), type_args_(type_args) {} + TypeInfo(SortIdx sort, TypeArgs &&type_args) : Sort_(sort), type_args_(std::move(type_args)) {} ~TypeInfo() = default; bool operator==(const TypeInfo &rhs) const { - return Sort_ == rhs.Sort_ && ParamsIdx_ == rhs.ParamsIdx_; + return Sort_ == rhs.Sort_ && type_args_ == rhs.type_args_; } size_t Arity() const { - return ParamsIdx_.size(); + return type_args_.size(); } const SortIdx &Sort() const { return Sort_; } - const TypeParamsIdx &ParamsIdx() const + const TypeArgs &GetTypeArgs() const { - return ParamsIdx_; + return type_args_; } private: SortIdx Sort_; - TypeParamsIdx ParamsIdx_; + TypeArgs type_args_; }; } // namespace panda::verifier @@ -72,8 +72,8 @@ struct hash { result ^= (result >> 17U) | (result << 16U); // NOLINT(readability-magic-numbers) #endif }; - hash_func(ti.ParamsIdx().size()); - for (const auto &v : ti.ParamsIdx()) { + hash_func(ti.GetTypeArgs().size()); + for (const auto &v : ti.GetTypeArgs()) { hash_func(v.GetIndex()); hash_func(static_cast(v.Variance())); } diff --git a/verification/type/type_parametric.cpp b/verification/type/type_parametric.cpp index b2f7f135102749cd4a649f2dc38e16d9d703a0a2..aa76feb1ca3af0d42c60756e2f373600ffbcebd3 100644 --- a/verification/type/type_parametric.cpp +++ b/verification/type/type_parametric.cpp @@ -15,46 +15,28 @@ #include "type_parametric.h" #include "type_system.h" -#include "type_systems.h" namespace panda::verifier { -TypeSystem &ParametricType::GetTypeSystem() const +bool ParametricType::ExistsWithTypeArgs(TypeArgs type_args, TypeSystem const *tsys) const { - return TypeSystems::Get(kind_, threadnum_); -} - -bool ParametricType::operator[](TypeParamsIdx params) const -{ - Index num = GetTypeSystem().FindNum({Sort_, std::move(params)}); + Index num = tsys->FindNum({Sort_, std::move(type_args)}); return num.IsValid(); } -Type ParametricType::operator()(TypeParamsIdx params) const -{ - auto num = GetTypeSystem().FindNumOrCreate({Sort_, std::move(params)}); - GetTypeSystem().Relate(GetTypeSystem().BotNum_, num); - GetTypeSystem().Relate(num, GetTypeSystem().TopNum_); - return {kind_, threadnum_, num}; -} - -bool ParametricType::operator[](const TypeParams ¶ms) const -{ - TypeParamsIdx params_idx {params}; // NOLINT(cppcoreguidelines-slicing) - return operator[](std::move(params_idx)); -} - -Type ParametricType::operator()(const TypeParams ¶ms) const +Type ParametricType::WithTypeArgs(TypeArgs type_args, TypeSystem *tsys) const { - TypeParamsIdx params_idx {params}; // NOLINT(cppcoreguidelines-slicing) - return operator()(std::move(params_idx)); + auto num = tsys->FindNumOrCreate({Sort_, std::move(type_args)}); + tsys->Relate(tsys->BotNum_, num); + tsys->Relate(num, tsys->TopNum_); + return {num}; } template -void ParametricType::ForAll(Handler &&handler) const +void ParametricType::ForAll(TypeSystem *tsys, Handler &&handler) const { - GetTypeSystem().ForAllTypes([this, &handler](const Type &type) { - if (type.Sort() == Sort_) { + tsys->ForAllTypes([this, tsys, &handler](const Type &type) { + if (type.Sort(tsys) == Sort_) { return handler(type); } return true; diff --git a/verification/type/type_parametric.h b/verification/type/type_parametric.h index 297d2c4e8e3563187635daf5274cf700cff8c0d3..896299e66746498946fa64a9c6f274a18099a0af 100644 --- a/verification/type/type_parametric.h +++ b/verification/type/type_parametric.h @@ -16,24 +16,18 @@ #ifndef _PANDA_TYPE_PARAMETRIC_HPP__ #define _PANDA_TYPE_PARAMETRIC_HPP__ -#include "type_params.h" +#include "type_arg.h" #include "type_sort.h" #include "type_tags.h" #include "type_type.h" namespace panda::verifier { class TypeSystem; -class TypeParams; class ParametricType { public: - TypeSystemKind kind_; - ThreadNum threadnum_; SortIdx Sort_; - ParametricType(TypeSystemKind kind, ThreadNum threadnum, SortIdx sort) - : kind_ {kind}, threadnum_ {threadnum}, Sort_(sort) - { - } + ParametricType(SortIdx sort) : Sort_(sort) {} friend class TypeSystem; ParametricType() = delete; @@ -43,14 +37,11 @@ public: ParametricType &operator=(ParametricType &&) = default; ~ParametricType() = default; - TypeSystem &GetTypeSystem() const; - bool operator[](TypeParamsIdx params) const; - Type operator()(TypeParamsIdx params = {}) const; - bool operator[](const TypeParams ¶ms) const; - Type operator()(const TypeParams ¶ms) const; + bool ExistsWithTypeArgs(TypeArgs type_args, TypeSystem const *tsys) const; + Type WithTypeArgs(TypeArgs type_args, TypeSystem *tsys) const; template - void ForAll(Handler &&handler) const; + void ForAll(TypeSystem *tsys, Handler &&handler) const; }; } // namespace panda::verifier diff --git a/verification/type/type_set.cpp b/verification/type/type_set.cpp index 32f350c0d41cf89dfdf9191bcde78c667c46ad30..d2056a925a6ad384a1055bb523559fa0f0f0cb44 100644 --- a/verification/type/type_set.cpp +++ b/verification/type/type_set.cpp @@ -19,32 +19,9 @@ namespace panda::verifier { -const Type &TypeSet::operator<<(const Type &st) const +TypeSet TsIntersection(TypeSet const *lhs, TypeSet const *rhs, [[maybe_unused]] TypeSystem const *tsys) { - ForAll([&](const Type &t) { - t << st; - return true; - }); - return st; -} - -const TypeSet &TypeSet::operator<<(const TypeSet &st) const -{ - ForAll([&](const Type &t) { - t << st; - return true; - }); - return st; -} - -TypeSet TypeSet::operator&(const Type &rhs) const -{ - return TypeSet {kind_, threadnum_, Numbers_ & TypeSystems::Get(kind_, threadnum_).GetDirectlyRelated(rhs.Number())}; -} - -TypeSet TypeSet::operator&(const TypeSet &rhs) const -{ - return TypeSet {kind_, threadnum_, Numbers_ & rhs.Numbers_}; + return TypeSet {lhs->Numbers_ & rhs->Numbers_}; } } // namespace panda::verifier diff --git a/verification/type/type_set.h b/verification/type/type_set.h index 92fd29bca2e22ecdadc8e97b1b2c16437876d090..33cd6449482363b4a03dd7e7d03ba6b47e4fd865 100644 --- a/verification/type/type_set.h +++ b/verification/type/type_set.h @@ -16,7 +16,7 @@ #ifndef _PANDA_TYPE_SET_HPP__ #define _PANDA_TYPE_SET_HPP__ -#include "type_index.h" +#include "type_arg.h" #include "type_type.h" #include "runtime/include/mem/panda_string.h" @@ -26,11 +26,8 @@ class TypeSystems; class TypeSet { public: - TypeSet() = delete; - template - explicit TypeSet(const Type &t, Types... types) - : kind_ {t.GetTypeSystemKind()}, threadnum_ {t.GetThreadNum()}, Numbers_ {} + explicit TypeSet(const Type &t, Types... types) : Numbers_ {} { if (sizeof...(types) == 0) { Numbers_.Insert(t.Number()); @@ -40,15 +37,12 @@ public: } } - explicit TypeSet(TypeSystemKind kind, ThreadNum threadnum, IntSet &&numbers = {}) - : kind_ {kind}, threadnum_ {threadnum}, Numbers_ {numbers} {}; + explicit TypeSet(IntSet &&numbers = {}) : Numbers_ {numbers} {}; ~TypeSet() = default; void Insert(const Type &t) { - ASSERT(t.GetTypeSystemKind() == kind_); - ASSERT(t.GetThreadNum() == threadnum_); Numbers_.Insert(t.Number()); } @@ -60,17 +54,9 @@ public: bool Contains(const Type &t) const { - return t.GetTypeSystemKind() == kind_ && t.GetThreadNum() == threadnum_ && Numbers_.Contains(t.Number()); + return Numbers_.Contains(t.Number()); } - const Type &operator<<(const Type &st) const; - - const TypeSet &operator<<(const TypeSet &st) const; - - TypeSet operator&(const Type &rhs) const; - - TypeSet operator&(const TypeSet &rhs) const; - size_t Size() const { return Numbers_.Size(); @@ -85,7 +71,7 @@ public: { Index the_only_number = Numbers_.TheOnlyElement(); if (the_only_number.IsValid()) { - return {kind_, threadnum_, *the_only_number}; + return {*the_only_number}; } return {}; } @@ -93,7 +79,7 @@ public: template bool ForAll(Handler &&handler) const { - return Numbers_.ForAll([&](TypeNum num) { return handler(Type(kind_, threadnum_, num)); }); + return Numbers_.ForAll([&](TypeNum num) { return handler(Type {num}); }); } template @@ -122,7 +108,7 @@ public: bool operator==(const TypeSet &rhs) const { - return kind_ == rhs.kind_ && threadnum_ == rhs.threadnum_ && Numbers_ == rhs.Numbers_; + return Numbers_ == rhs.Numbers_; } bool operator!=(const TypeSet &rhs) const @@ -131,10 +117,13 @@ public: } private: - TypeSystemKind kind_; - ThreadNum threadnum_; IntSet Numbers_; + + friend TypeSet TsIntersection(TypeSet const *, TypeSet const *, TypeSystem const *); }; + +TypeSet TsIntersection(TypeSet const *lhs, TypeSet const *rhs, TypeSystem const *tsys); + } // namespace panda::verifier #endif // !_PANDA_TYPE_SET_HPP__ diff --git a/verification/type/type_system.h b/verification/type/type_system.h index b830505c2548a0f2431bf05f8ec0194eea9fe831..25641bfd415f73b10e0d472847a5be9d52cac9f6 100644 --- a/verification/type/type_system.h +++ b/verification/type/type_system.h @@ -21,13 +21,10 @@ #include "subtyping_closure.h" #include "type_sort.h" -#include "type_index.h" #include "type_info.h" -#include "type_param.h" -#include "type_params.h" +#include "type_arg.h" #include "type_parametric.h" #include "type_set.h" -#include "type_systems.h" #include "type_tags.h" #include "type_type.h" @@ -69,13 +66,7 @@ Design decisions: class TypeSystem { public: - TypeSystem(SortIdx bot, SortIdx top, ThreadNum threadnum = 0, TypeSystemKind kind = TypeSystemKind::PANDA) - : kind_ {kind}, - threadnum_ {threadnum}, - BotNum_ {FindNumOrCreate({bot, {}})}, - TopNum_ {FindNumOrCreate({top, {}})} - { - } + TypeSystem(SortIdx bot, SortIdx top) : BotNum_ {FindNumOrCreate({bot, {}})}, TopNum_ {FindNumOrCreate({top, {}})} {} NO_COPY_SEMANTIC(TypeSystem); DEFAULT_MOVE_SEMANTIC(TypeSystem); @@ -96,9 +87,6 @@ public: bool IncrementalSubtypingClosure_ = true; bool DeferIncrementalSubtypingClosure_ = false; - TypeSystemKind kind_ = TypeSystemKind::PANDA; - ThreadNum threadnum_ = 0; - Index FindNum(const TypeInfo &ti) const { auto it = InfoToNum_.find(ti); @@ -118,13 +106,13 @@ public: TypingRel_.EnsureMinSize(num); Universe_.push_back(ti); ParameterOf_.push_back({}); - const auto ¶ms = ti.ParamsIdx(); - for (const auto ¶m : params) { - ParameterOf_[param].insert(num); + const auto &type_args = ti.GetTypeArgs(); + for (const auto &type_arg : type_args) { + ParameterOf_[type_arg].insert(num); } InfoToNum_[ti] = num; SortIdx sort = ti.Sort(); - size_t arity = params.size(); + size_t arity = type_args.size(); if (sort >= TypeClasses_.size()) { TypeClasses_.resize(sort + 1); } @@ -136,8 +124,8 @@ public: const VectorNum &TypeClassNum(TypeNum type) const { const auto &info = Universe_[type]; - const auto ¶ms = info.ParamsIdx(); - return TypeClasses_[info.Sort()][params.size()]; + const auto &type_args = info.GetTypeArgs(); + return TypeClasses_[info.Sort()][type_args.size()]; } void PerformClosingCurrentRelation() @@ -207,7 +195,7 @@ public: } } - bool CheckIfLhsParamsSubtypeOfRhs(const TypeParamsIdx &lhs, const TypeParamsIdx &rhs) const + bool CheckIfLhsParamsSubtypeOfRhs(const TypeArgs &lhs, const TypeArgs &rhs) const { if (lhs.size() != rhs.size()) { return false; @@ -226,7 +214,7 @@ public: return false; } break; - case TypeVariance::CONTRVARIANT: + case TypeVariance::CONTRAVARIANT: if (!TypingRel_.IsInInverseRelation(*lhs_it, *rhs_it)) { return false; } @@ -245,9 +233,9 @@ public: if (lhsInfo.Sort() != rhsInfo.Sort()) { return false; } - const TypeParamsIdx &lhsParams = lhsInfo.ParamsIdx(); - const TypeParamsIdx &rhsParams = rhsInfo.ParamsIdx(); - return CheckIfLhsParamsSubtypeOfRhs(lhsParams, rhsParams); + const TypeArgs &lhs_type_args = lhsInfo.GetTypeArgs(); + const TypeArgs &rhs_type_args = rhsInfo.GetTypeArgs(); + return CheckIfLhsParamsSubtypeOfRhs(lhs_type_args, rhs_type_args); } bool IsInDirectRelation(TypeNum lhs, TypeNum rhs) const @@ -265,13 +253,12 @@ public: return Universe_[t].Arity(); } - const TypeParamsIdx &GetParamsIdx(TypeNum t) const + const TypeArgs &GetTypeArgs(TypeNum t) const { - return Universe_[t].ParamsIdx(); + return Universe_[t].GetTypeArgs(); } friend class Type; - friend class TypeParams; friend class ParametricType; void SetIncrementalRelationClosureMode(bool state) @@ -288,7 +275,7 @@ public: void ForAllTypes(Handler &&handler) const { for (size_t num = 0; num < Universe_.size(); ++num) { - if (!handler(Type {kind_, threadnum_, num})) { + if (!handler(Type {num})) { return; } } @@ -298,8 +285,8 @@ public: void ForAllSubtypesOf(const Type &t, Handler &&handler) const { auto num = t.Number(); - auto callback = [this, &handler](const TypeNum &t_num) { - bool result = handler(Type {kind_, threadnum_, t_num}); + auto callback = [&handler](const TypeNum &t_num) { + bool result = handler(Type {t_num}); return result; }; TypingRel_.ForAllTo(num, callback); @@ -309,8 +296,8 @@ public: void ForAllSupertypesOf(const Type &t, Handler &&handler) const { auto num = t.Number(); - auto callback = [this, &handler](const TypeNum &t_num) { - bool result = handler(Type {kind_, threadnum_, t_num}); + auto callback = [&handler](const TypeNum &t_num) { + bool result = handler(Type {t_num}); return result; }; TypingRel_.ForAllFrom(num, callback); @@ -329,9 +316,9 @@ public: void CloseSubtypingRelation() { ForAllTypes([this](const Type &type) { - auto sort = type.Sort(); + auto sort = type.Sort(this); auto number = type.Number(); - auto arity = type.Arity(); + auto arity = type.Arity(this); SubtypingClosureCurrent_.AddType(sort, number, arity); return true; }); @@ -351,27 +338,27 @@ public: ParametricType Parametric(SortIdx sort) { - return {kind_, threadnum_, sort}; + return {sort}; } Type Bot() const { - return {kind_, threadnum_, BotNum_}; + return {BotNum_}; } Type Top() const { - return {kind_, threadnum_, TopNum_}; + return {TopNum_}; } - TypeSystemKind GetKind() const + bool IsBot(Type type) const { - return kind_; + return type.Number() == BotNum_; } - ThreadNum GetThreadNum() const + bool IsTop(Type type) const { - return threadnum_; + return type.Number() == TopNum_; } private: diff --git a/verification/type/type_systems.cpp b/verification/type/type_systems.cpp deleted file mode 100644 index 7d16c2f755c9084f3780e20c45f24b950bce5521..0000000000000000000000000000000000000000 --- a/verification/type/type_systems.cpp +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "type_system.h" -#include "type_sort.h" -#include "type_image.h" -#include "type_params.h" -#include "type_systems.h" -#include "type_tags.h" - -#include "verification/util/enum_array.h" -#include "verification/util/hash.h" - -#include "runtime/include/mem/allocator.h" -#include "runtime/include/mem/panda_containers.h" - -#include "macros.h" - -namespace panda::verifier { - -class FullTypeSystem { -public: - explicit FullTypeSystem(TypeSystemKind kind, ThreadNum threadnum) - : sort_ {"Bot", "Top"}, - type_image_ {sort_}, - bot_sort_ {sort_["Bot"]}, - top_sort_ {sort_["Top"]}, - type_system_ {bot_sort_, top_sort_, threadnum, kind} - { - } - NO_COPY_SEMANTIC(FullTypeSystem); - // can't use the default move constructor due to an internal pointer in type_image_ - FullTypeSystem(FullTypeSystem &&other) - : sort_ {std::move(other.sort_)}, - type_image_ {sort_}, - bot_sort_ {std::exchange(other.bot_sort_, 0)}, - top_sort_ {std::exchange(other.top_sort_, 1)}, - type_system_ {std::move(other.type_system_)} - { - } - NO_MOVE_OPERATOR(FullTypeSystem); - ~FullTypeSystem() = default; - SortIdx GetSort(const PandaString &name) - { - return sort_[name]; - } - const PandaString &ImageOfType(const Type &type) - { - return type_image_[type]; - } - PandaString ImageOfTypeParams(const TypeParams &type_params) - { - return type_image_.ImageOfTypeParams(type_params); - } - TypeSystem &GetTypeSystem() - { - return type_system_; - } - -private: - SortNames sort_; - TypeImage type_image_; - SortIdx bot_sort_; - SortIdx top_sort_; - TypeSystem type_system_; -}; - -struct TypeSystems::Impl { - explicit Impl(size_t numThreads) - { - for (auto kind : {TypeSystemKind::PANDA, TypeSystemKind::JAVA}) { - type_systems[kind].reserve(numThreads); - for (ThreadNum threadNum = 0; threadNum < numThreads; threadNum++) { - type_systems[kind].emplace_back(kind, threadNum); - } - } - } - - FullTypeSystem &GetFullTypeSystem(TypeSystem *tsys) - { - return type_systems[tsys->GetKind()][tsys->GetThreadNum()]; - } - - FullTypeSystem &GetFullTypeSystem(TypeSystemKind kind, ThreadNum threadNum) - { - return type_systems[kind][threadNum]; - } - -private: - EnumArray, TypeSystemKind, TypeSystemKind::PANDA, TypeSystemKind::JAVA> type_systems; -}; - -void TypeSystems::Initialize(size_t numThreads) -{ - if (impl != nullptr) { - return; - } - impl = new (mem::AllocatorAdapter().allocate(1)) Impl {numThreads}; - ASSERT(impl != nullptr); -} - -void TypeSystems::Destroy() -{ - if (impl == nullptr) { - return; - } - impl->~Impl(); - mem::AllocatorAdapter().deallocate(impl, 1); - impl = nullptr; -} - -const PandaString &TypeSystems::ImageOfType(const Type &type) -{ - ASSERT(impl != nullptr); - return impl->GetFullTypeSystem(&type.GetTypeSystem()).ImageOfType(type); -} - -PandaString TypeSystems::ImageOfTypeParams(const TypeParams &type) -{ - ASSERT(impl != nullptr); - return impl->GetFullTypeSystem(&type.GetTypeSystem()).ImageOfTypeParams(type); -} - -SortIdx TypeSystems::GetSort(TypeSystemKind kind, ThreadNum threadnum, const PandaString &name) -{ - ASSERT(impl != nullptr); - return impl->GetFullTypeSystem(kind, threadnum).GetSort(name); -} - -TypeSystem &TypeSystems::Get(TypeSystemKind kind, ThreadNum threadnum) -{ - ASSERT(impl != nullptr); - return impl->GetFullTypeSystem(kind, threadnum).GetTypeSystem(); -} - -} // namespace panda::verifier diff --git a/verification/type/type_tags.h b/verification/type/type_tags.h index 687643571e6d61717d58a3e780bcfc94f6378047..c6bcebb1e684947dfc909ea2cbf3d281d59aa211 100644 --- a/verification/type/type_tags.h +++ b/verification/type/type_tags.h @@ -27,16 +27,12 @@ namespace panda::verifier { using TypeNum = size_t; using VectorNum = PandaVector; -enum class TypeSystemKind { PANDA, JAVA }; -using TypeSystemKindTag = TagForEnum; - using ThreadNum = size_t; constexpr size_t MAX_THREADS = 64; -using ThreadNumTag = TagForInt; -enum class TypeVariance { INVARIANT, COVARIANT, CONTRVARIANT }; +enum class TypeVariance { INVARIANT, COVARIANT, CONTRAVARIANT }; using TypeVarianceTag = - TagForEnum; + TagForEnum; } // namespace panda::verifier #endif // !PANDA_TYPE_TAGS_H_ diff --git a/verification/type/type_type.cpp b/verification/type/type_type.cpp index 951bbf048d3db0574ccda373e8e1444145080c0e..4fa22824d0867b16620520e6a91c2f9dab1a0cd1 100644 --- a/verification/type/type_type.cpp +++ b/verification/type/type_type.cpp @@ -13,52 +13,43 @@ * limitations under the License. */ +#include "type_type.h" +#include "type_arg.h" #include "type_system.h" - #include "type_sort.h" -#include "type_index.h" #include "type_info.h" - -#include "type_type.h" #include "type_set.h" -#include "type_param.h" namespace panda::verifier { template <> -TypeSet Type::operator|(const Type &t) const -{ - return TypeSet {*this, t}; -} - -template <> -TypeParam Type::operator+() const +TypeArg Type::operator+() const { - return {TypeVariance::COVARIANT, *this}; + return {TypeVariance::COVARIANT, Idx_}; } template <> -TypeParam Type::operator-() const +TypeArg Type::operator-() const { - return {TypeVariance::CONTRVARIANT, *this}; + return {TypeVariance::CONTRAVARIANT, Idx_}; } template <> -TypeParam Type::operator~() const +TypeArg Type::operator~() const { - return {TypeVariance::INVARIANT, *this}; + return {TypeVariance::INVARIANT, Idx_}; } template <> -TypeParams Type::Params() const +TypeArgs Type::GetTypeArgs(TypeSystem const *tsys) const { - return {GetTypeSystemKind(), GetThreadNum(), GetTypeSystem().GetParamsIdx(Idx_)}; + return TypeArgs {tsys->GetTypeArgs(Idx_)}; } template <> -TypeParam Type::operator*(TypeVariance variance) const +TypeArg Type::operator*(TypeVariance variance) const { - return {variance, *this}; + return {variance, Idx_}; } bool Type::operator==(const Type &t) const @@ -71,89 +62,39 @@ bool Type::operator!=(const Type &t) const return t.Idx_ != Idx_; } -const Type &Type::operator<<(const Type &t) const -{ - ASSERT(GetTypeSystemKind() == t.GetTypeSystemKind()); - ASSERT(GetThreadNum() == t.GetThreadNum()); - GetTypeSystem().Relate(Idx_, t.Idx_); - return t; -} - -const TypeSet &Type::operator<<(const TypeSet &s) const -{ - s.ForAll([&](const Type &t) { - operator<<(t); - return true; - }); - return s; -} - -bool Type::operator<=(const Type &rhs) const -{ - return GetTypeSystem().IsInDirectRelation(Idx_, rhs.Idx_); -} - -bool Type::operator<=(const TypeParams &rhs) const -{ - return TypeParams {GetTypeSystemKind(), GetThreadNum()} <= rhs; -} - -SortIdx Type::Sort() const +bool IsSubtype(Type lhs, Type rhs, TypeSystem const *tsys) { - return GetTypeSystem().GetSort(Idx_); + return tsys->IsInDirectRelation(lhs.Idx_, rhs.Idx_); } -size_t Type::Arity() const +void MakeSubtype(Type sub, Type super, TypeSystem *tsys) { - return GetTypeSystem().GetArity(Idx_); + tsys->Relate(sub.Idx_, super.Idx_); } -size_t Type::ParamsSize() const +SortIdx Type::Sort(TypeSystem const *tsys) const { - return GetTypeSystem().GetParamsIdx(Idx_).size(); + return tsys->GetSort(Idx_); } -TypeSystem &Type::GetTypeSystem() const +size_t Type::Arity(TypeSystem const *tsys) const { - return TypeSystems::Get(GetTypeSystemKind(), GetThreadNum()); + return tsys->GetArity(Idx_); } -TypeSystemKind Type::GetTypeSystemKind() const +size_t Type::TypeArgsSize(TypeSystem const *tsys) const { - return Idx_.GetTag<0>(); -} - -ThreadNum Type::GetThreadNum() const -{ - return Idx_.GetTag<1>(); + return tsys->GetTypeArgs(Idx_).size(); } bool Type::IsValid() const { - return Idx_.IsValid(); -} - -bool Type::IsTop() const -{ - return GetTypeSystem().Top() == *this; -} - -bool Type::IsBot() const -{ - return GetTypeSystem().Bot() == *this; -} - -TypeSet Type::operator&(const Type &rhs) const -{ - ASSERT(GetTypeSystemKind() == rhs.GetTypeSystemKind()); - const TypeSystem &type_system = GetTypeSystem(); - return TypeSet {GetTypeSystemKind(), GetThreadNum(), - type_system.GetDirectlyRelated(Idx_) & type_system.GetDirectlyRelated(rhs.Idx_)}; + return Idx_ != 0; } -TypeSet Type::operator&(const TypeSet &rhs) const +TypeSet Type::AllSupertypes(TypeSystem const *tsys) { - return rhs & *this; + return TypeSet {IntSet {tsys->GetDirectlyRelated(Idx_)}}; } TypeNum Type::Number() const diff --git a/verification/type/type_type.h b/verification/type/type_type.h index b495ac7bfa719ff562f6ace69ba5771031a851b0..8386966f010ae5d86500dfecd1dd016bfc2037f3 100644 --- a/verification/type/type_type.h +++ b/verification/type/type_type.h @@ -17,20 +17,18 @@ #define _PANDA_TYPE_TYPE_HPP__ #include "type_sort.h" -#include "type_index.h" +#include "type_arg.h" #include "type_info.h" #include "type_tags.h" namespace panda::verifier { class TypeSystem; class TypeSet; -class TypeParam; -class TypeParams; +class TypeArg; class Type { - using TypeIndex = TaggedIndex; - public: + Type(TypeNum num) : Idx_ {num} {} Type() = default; Type(const Type &) = default; Type(Type &&) = default; @@ -42,82 +40,56 @@ public: bool operator!=(const Type &t) const; - const Type &operator<<(const Type &t) const; - - // def subtyping a << (b | c) << d - const TypeSet &operator<<(const TypeSet &s) const; - // a workaround for absence of mutualy-recursive classes in C++ template - TypeSet operator|(const Type &t) const; - - template - TypeParam operator+() const; + TypeArg operator+() const; template - TypeParam operator-() const; + TypeArg operator-() const; template - TypeParam operator~() const; + TypeArg operator~() const; - // subtyping relation: <= - bool operator<=(const Type &rhs) const; - bool operator<=(const TypeParams &rhs) const; + SortIdx Sort(TypeSystem const *tsys) const; - SortIdx Sort() const; - - size_t Arity() const; + size_t Arity(TypeSystem const *tsys) const; template - TypeParams Params() const; - - size_t ParamsSize() const; - - TypeSystem &GetTypeSystem() const; - - TypeSystemKind GetTypeSystemKind() const; + TypeArgs GetTypeArgs(TypeSystem const *tsys) const; - ThreadNum GetThreadNum() const; + size_t TypeArgsSize(TypeSystem const *tsys) const; bool IsValid() const; - bool IsTop() const; - - bool IsBot() const; - - TypeSet operator&(const Type &rhs) const; - - TypeSet operator&(const TypeSet &rhs) const; - template - TypeParam operator*(TypeVariance variance) const; + TypeArg operator*(TypeVariance variance) const; + + TypeSet AllSupertypes(TypeSystem const *tsys); // TODO(vdyadov): implement template - void ForAllParams(Handler &&handler) const; + void ForAllTypeArgs(Handler &&handler) const; template - void ForAllSupertypes(Handler &&handler) const; + void ForAllSupertypes(TypeSystem const *tsys, Handler &&handler) const; template - void ForAllSupertypesOfSort(SortIdx sort, Handler &&handler) const; + void ForAllSupertypesOfSort(TypeSystem const *tsys, SortIdx sort, Handler &&handler) const; template - void ForAllSubtypes(Handler &&handler) const; + void ForAllSubtypes(TypeSystem const *tsys, Handler &&handler) const; template - void ForAllSubtypesOfSort(SortIdx sort, Handler &&handler) const; + void ForAllSubtypesOfSort(TypeSystem const *tsys, SortIdx sort, Handler &&handler) const; private: - Type(TypeSystemKind kind, ThreadNum threadnum, TypeNum num) : Idx_ {kind, threadnum, num} {}; - TypeNum Number() const; - TypeIndex Idx_; + TypeNum Idx_; friend class TypeSystem; - friend class TypeParam; + friend class TypeArg; friend class ParametricType; friend class PandaTypes; friend class TypeSet; @@ -125,7 +97,14 @@ private: friend class TypeImage; friend struct std::hash; + + friend bool IsSubtype(Type, Type, TypeSystem const *); + friend void MakeSubtype(Type, Type, TypeSystem *); }; + +bool IsSubtype(Type lhs, Type rhs, TypeSystem const *tsys); +void MakeSubtype(Type sub, Type super, TypeSystem *tsys); + } // namespace panda::verifier namespace std { diff --git a/verification/type/type_type_inl.h b/verification/type/type_type_inl.h index a76b08d2538d8d4f7307cdb6c13bfa0476781e14..0219cb045eb61acaf70fce3329ec049378f45975 100644 --- a/verification/type/type_type_inl.h +++ b/verification/type/type_type_inl.h @@ -16,8 +16,8 @@ #ifndef PANDA_TYPE_TYPE_INL_HPP__ #define PANDA_TYPE_TYPE_INL_HPP__ +#include "type_arg.h" #include "type_system.h" -#include "type_param.h" #include "type_type.h" #include "verification/util/lazy.h" @@ -26,16 +26,16 @@ namespace panda::verifier { // TODO(vdyadov): implement template -void Type::ForAllSupertypes(Handler &&handler) const +void Type::ForAllSupertypes(TypeSystem const *tsys, Handler &&handler) const { - GetTypeSystem().ForAllSupertypesOf(*this, std::move(handler)); + tsys->ForAllSupertypesOf(*this, std::move(handler)); } template -void Type::ForAllSupertypesOfSort(SortIdx sort, Handler &&handler) const +void Type::ForAllSupertypesOfSort(TypeSystem const *tsys, SortIdx sort, Handler &&handler) const { - ForAllSupertypes([this, &sort, &handler](const Type &type) { - if (type.Sort() == sort) { + ForAllSupertypes(tsys, [this, tsys, &sort, &handler](const Type &type) { + if (type.Sort(tsys) == sort) { return handler(type); } return true; @@ -43,16 +43,16 @@ void Type::ForAllSupertypesOfSort(SortIdx sort, Handler &&handler) const } template -void Type::ForAllSubtypes(Handler &&handler) const +void Type::ForAllSubtypes(TypeSystem const *tsys, Handler &&handler) const { - GetTypeSystem().ForAllSubtypesOf(*this, std::move(handler)); + tsys->ForAllSubtypesOf(*this, std::move(handler)); } template -void Type::ForAllSubtypesOfSort(SortIdx sort, Handler &&handler) const +void Type::ForAllSubtypesOfSort(TypeSystem const *tsys, SortIdx sort, Handler &&handler) const { - ForAllSubtypes([this, &sort, &handler](const Type &type) { - if (type.Sort() == sort) { + ForAllSubtypes(tsys, [this, tsys, &sort, &handler](const Type &type) { + if (type.Sort(tsys) == sort) { return handler(type); } return true; diff --git a/verification/value/abstract_type.h b/verification/value/abstract_type.h index d351600d0de5e235d3f28f9b7f3e8146533780be..65b948b429e32cf1c80f1187b7cc541cdf5a192b 100644 --- a/verification/value/abstract_type.h +++ b/verification/value/abstract_type.h @@ -18,6 +18,7 @@ #include "verification/value/variables.h" #include "verification/type/type_set.h" +#include "verification/type/type_system.h" #include "macros.h" @@ -101,49 +102,18 @@ public: return std::holds_alternative(Contents_); } - bool IsConsistent() const + bool IsConsistent(TypeSystem const *tsys) const { if (IsType()) { - return !GetType().IsTop(); + return !tsys->IsTop(GetType()); } else if (IsTypeSet()) { Type the_only_type = GetTypeSet().TheOnlyType(); - return !(the_only_type.IsValid() && the_only_type.IsTop()); + return !(the_only_type.IsValid() && tsys->IsTop(the_only_type)); } else { return false; } } - AbstractType operator&(const AbstractType &rhs) const - { - if (IsType()) { - if (rhs.IsType()) { - Type lhs_type = GetType(); - Type rhs_type = rhs.GetType(); - if (lhs_type <= rhs_type) { - return rhs_type; - } else if (rhs_type <= lhs_type) { - return lhs_type; - } else { - return lhs_type & rhs_type; - } - } else if (rhs.IsTypeSet()) { - return MergeTypeAndTypeSet(GetType(), rhs.GetTypeSet()); - } else { - UNREACHABLE(); - } - } else if (IsTypeSet()) { - if (rhs.IsType()) { - return MergeTypeAndTypeSet(rhs.GetType(), GetTypeSet()); - } else if (rhs.IsTypeSet()) { - return GetTypeSet() & rhs.GetTypeSet(); - } else { - UNREACHABLE(); - } - } else { - UNREACHABLE(); - } - } - template PandaString Image(TypeImageFunc type_img_func) const { @@ -187,15 +157,51 @@ public: private: ContentsData Contents_; - AbstractType MergeTypeAndTypeSet(Type type, const TypeSet &type_set) const - { - if (type_set.Contains(type)) { - return type; + friend AbstractType AtpJoin(AbstractType const *lhs, AbstractType const *rhs, TypeSystem const *tsys); +}; + +static AbstractType MergeTypeAndTypeSet(Type type, TypeSet const *type_set, TypeSystem const *tsys) +{ + if (type_set->Contains(type)) { + return type; + } else { + auto supertypes = type.AllSupertypes(tsys); + return TsIntersection(&supertypes, type_set, tsys); + } +} + +inline AbstractType AtpJoin(AbstractType const *lhs, AbstractType const *rhs, TypeSystem const *tsys) +{ + if (lhs->IsType()) { + if (rhs->IsType()) { + Type lhs_type = lhs->GetType(); + Type rhs_type = rhs->GetType(); + if (IsSubtype(lhs_type, rhs_type, tsys)) { + return *rhs; + } else if (IsSubtype(rhs_type, lhs_type, tsys)) { + return *lhs; + } else { + auto lhs_supers = lhs_type.AllSupertypes(tsys); + auto rhs_supers = rhs_type.AllSupertypes(tsys); + return TsIntersection(&lhs_supers, &rhs_supers, tsys); + } + } else if (rhs->IsTypeSet()) { + return MergeTypeAndTypeSet(lhs->GetType(), &rhs->GetTypeSet(), tsys); } else { - return type & type_set; + UNREACHABLE(); } + } else if (lhs->IsTypeSet()) { + if (rhs->IsType()) { + return MergeTypeAndTypeSet(rhs->GetType(), &lhs->GetTypeSet(), tsys); + } else if (rhs->IsTypeSet()) { + return TsIntersection(&lhs->GetTypeSet(), &rhs->GetTypeSet(), tsys); + } else { + UNREACHABLE(); + } + } else { + UNREACHABLE(); } -}; +} } // namespace panda::verifier diff --git a/verification/value/abstract_typed_value.h b/verification/value/abstract_typed_value.h index 95e34a297abc3a32f0d9a5fbf5a17bb0feea33b4..966797f3672fb53299c705f14a42759651e004e7 100644 --- a/verification/value/abstract_typed_value.h +++ b/verification/value/abstract_typed_value.h @@ -76,16 +76,9 @@ public: { return Value_; } - AbstractTypedValue operator&(const AbstractTypedValue &rhs) const + bool IsConsistent(TypeSystem const *tsys) const { - if (Origin_.IsValid() && rhs.Origin_.IsValid() && Origin_ == rhs.Origin_) { - return {Type_ & rhs.GetAbstractType(), Value_ & rhs.GetAbstractValue(), Origin_}; - } - return {Type_ & rhs.GetAbstractType(), Value_ & rhs.GetAbstractValue()}; - } - bool IsConsistent() const - { - return Type_.IsConsistent(); + return Type_.IsConsistent(tsys); } ValueOrigin &GetOrigin() { @@ -114,8 +107,19 @@ private: AbstractValue Value_; AbstractType Type_; ValueOrigin Origin_; + + friend AbstractTypedValue AtvJoin(AbstractTypedValue const *, AbstractTypedValue const *, TypeSystem const *); }; +inline AbstractTypedValue AtvJoin(AbstractTypedValue const *lhs, AbstractTypedValue const *rhs, TypeSystem const *tsys) +{ + if (lhs->Origin_.IsValid() && rhs->Origin_.IsValid() && lhs->Origin_ == rhs->Origin_) { + return {AtpJoin(&lhs->Type_, &rhs->GetAbstractType(), tsys), lhs->Value_ & rhs->GetAbstractValue(), + lhs->Origin_}; + } + return {AtpJoin(&lhs->Type_, &rhs->GetAbstractType(), tsys), lhs->Value_ & rhs->GetAbstractValue()}; +} + } // namespace panda::verifier #endif // !_PANDA_VERIFIER_ABSTRACT_TYPED_VALUE_HPP diff --git a/verification/value/tests/abstract_typed_value_test.cpp b/verification/value/tests/abstract_typed_value_test.cpp index 11b0595a9cfcfa60eb2ebffe962af2ae951a6dc4..33096bc8c002059593d0ea28499b1e21f10200c1 100644 --- a/verification/value/tests/abstract_typed_value_test.cpp +++ b/verification/value/tests/abstract_typed_value_test.cpp @@ -33,26 +33,31 @@ TEST_F(VerifierTest, AbstractTypedValue) auto Top = type_system.Top(); - auto i8 = type_system.Parametric(sort["i8"])(); - auto i16 = type_system.Parametric(sort["i16"])(); - auto i32 = type_system.Parametric(sort["i32"])(); - auto i64 = type_system.Parametric(sort["i64"])(); + auto i8 = type_system.Parametric(sort["i8"]).WithTypeArgs({}, &type_system); + ; + auto i16 = type_system.Parametric(sort["i16"]).WithTypeArgs({}, &type_system); + auto i32 = type_system.Parametric(sort["i32"]).WithTypeArgs({}, &type_system); + auto i64 = type_system.Parametric(sort["i64"]).WithTypeArgs({}, &type_system); - i8 << i16 << i32 << i64; + MakeSubtype(i8, i16, &type_system); + MakeSubtype(i16, i32, &type_system); + MakeSubtype(i32, i64, &type_system); - auto u8 = type_system.Parametric(sort["u8"])(); - auto u16 = type_system.Parametric(sort["u16"])(); - auto u32 = type_system.Parametric(sort["u32"])(); - auto u64 = type_system.Parametric(sort["u64"])(); + auto u8 = type_system.Parametric(sort["u8"]).WithTypeArgs({}, &type_system); + auto u16 = type_system.Parametric(sort["u16"]).WithTypeArgs({}, &type_system); + auto u32 = type_system.Parametric(sort["u32"]).WithTypeArgs({}, &type_system); + auto u64 = type_system.Parametric(sort["u64"]).WithTypeArgs({}, &type_system); - u8 << u16 << u32 << u64; + MakeSubtype(u8, u16, &type_system); + MakeSubtype(u16, u32, &type_system); + MakeSubtype(u32, u64, &type_system); auto nv = [&variables] { return variables.NewVar(); }; AbstractTypedValue av1 {i16, nv()}; AbstractTypedValue av2 {i32, nv()}; - auto av3 = av1 & av2; + auto av3 = AtvJoin(&av1, &av2, &type_system); auto t3 = av3.GetAbstractType().GetType(); @@ -60,7 +65,7 @@ TEST_F(VerifierTest, AbstractTypedValue) AbstractTypedValue av4 {u16, nv()}; - auto av5 = av1 & av4; + auto av5 = AtvJoin(&av1, &av4, &type_system); auto t5 = av5.GetAbstractType().GetType(); diff --git a/verification/verification.gni b/verification/verification.gni index e7d510cb12fd097d64d4002c0c3494e59a515b32..5bc754c2533b2803a51970c1526d2da1ff5c13ba 100644 --- a/verification/verification.gni +++ b/verification/verification.gni @@ -14,11 +14,9 @@ import("//ark/runtime_core/ark_config.gni") type_sources = [ - "$ark_root/verification/type/type_param.cpp", + "$ark_root/verification/type/type_arg.cpp", "$ark_root/verification/type/type_parametric.cpp", - "$ark_root/verification/type/type_params.cpp", "$ark_root/verification/type/type_set.cpp", - "$ark_root/verification/type/type_systems.cpp", "$ark_root/verification/type/type_type.cpp", ] @@ -64,9 +62,11 @@ verifier_cache_sources = [ "$ark_root/verification/cache/results_cache.cpp" ] verifier_sources = [ - "$ark_root/verification/public.cpp", - "$ark_root/verification/verifier_messages_data.cpp", - "$ark_root/verification/verification_options.cpp", + "$ark_root/verification/default_plugin.cpp", + "$ark_root/verification/plugins.cpp", + "$ark_root/verification/public.cpp", + "$ark_root/verification/verification_options.cpp", + "$ark_root/verification/verifier_messages_data.cpp", ] verifier_sources += type_sources verifier_sources += cflow_sources