Skip to content

Commit

Permalink
FEAT(loongarch): Support loongarch with 11.0.22
Browse files Browse the repository at this point in the history
--story=117064695
  • Loading branch information
shouhuanxiaoji authored and shiyuexw committed Apr 19, 2024
1 parent ad5353a commit e47c9d8
Show file tree
Hide file tree
Showing 316 changed files with 113,186 additions and 204 deletions.
3 changes: 3 additions & 0 deletions make/CompileJavaModules.gmk
Original file line number Diff line number Diff line change
Expand Up @@ -430,13 +430,15 @@ jdk.internal.vm.ci_ADD_JAVAC_FLAGS += -parameters -Xlint:-exports -XDstringConca

jdk.internal.vm.compiler_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.aarch64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.loongarch64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.amd64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code.site=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code.stack=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.common=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.loongarch64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.sparc=jdk.internal.vm.compiler \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.meta=jdk.internal.vm.compiler \
Expand All @@ -456,6 +458,7 @@ jdk.internal.vm.compiler_EXCLUDES += \
org.graalvm.compiler.api.directives.test \
org.graalvm.compiler.api.test \
org.graalvm.compiler.asm.aarch64.test \
org.graalvm.compiler.asm.loongarch64.test \
org.graalvm.compiler.asm.amd64.test \
org.graalvm.compiler.asm.sparc.test \
org.graalvm.compiler.asm.test \
Expand Down
31 changes: 29 additions & 2 deletions make/autoconf/hotspot.m4
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ DEPRECATED_JVM_FEATURES="trace"
# All valid JVM variants
VALID_JVM_VARIANTS="server client minimal core zero custom"

#
# This file has been modified by Loongson Technology in 2021. These
# modifications are Copyright (c) 2020, 2021, Loongson Technology, and are made
# available on the same license terms set forth above.
#

###############################################################################
# Check if the specified JVM variant should be built. To be used in shell if
# constructs, like this:
Expand Down Expand Up @@ -340,6 +346,26 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
HOTSPOT_TARGET_CPU_ARCH=arm
fi
# Override hotspot cpu definitions for MIPS and LOONGARCH platforms
if test "x$OPENJDK_TARGET_CPU" = xmips64el && test "x$HOTSPOT_TARGET_CPU" != xzero; then
HOTSPOT_TARGET_CPU=mips_64
HOTSPOT_TARGET_CPU_ARCH=mips
elif test "x$OPENJDK_TARGET_CPU" = xloongarch64 && test "x$HOTSPOT_TARGET_CPU" != xzero; then
HOTSPOT_TARGET_CPU=loongarch_64
HOTSPOT_TARGET_CPU_ARCH=loongarch
fi
# Disable compiler1 on linux-mips and linux-loongarch
if ! (HOTSPOT_CHECK_JVM_FEATURE(compiler1)); then
AC_MSG_CHECKING([if compiler1 should be built, $JVM_FEATURES])
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$HOTSPOT_TARGET_CPU_ARCH" = "xmips"; then
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES compiler1"
AC_MSG_RESULT([no, platform not supported])
else
AC_MSG_RESULT([yes])
fi
fi
# Verify that dependencies are met for explicitly set features.
if HOTSPOT_CHECK_JVM_FEATURE(jvmti) && ! HOTSPOT_CHECK_JVM_FEATURE(services); then
AC_MSG_ERROR([Specified JVM feature 'jvmti' requires feature 'services'])
Expand Down Expand Up @@ -424,10 +450,11 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
JVM_FEATURES_jvmci=""
INCLUDE_JVMCI="false"
else
# Only enable jvmci on x86_64, sparcv9 and aarch64
# Only enable jvmci on x86_64, sparcv9, aarch64 and loongarch64
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
test "x$OPENJDK_TARGET_CPU" = "xaarch64" || \
test "x$OPENJDK_TARGET_CPU" = "xloongarch64" ; then
AC_MSG_RESULT([yes])
JVM_FEATURES_jvmci="jvmci"
INCLUDE_JVMCI="true"
Expand Down
12 changes: 12 additions & 0 deletions make/autoconf/platform.m4
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@
# questions.
#

#
# This file has been modified by Loongson Technology in 2021. These
# modifications are Copyright (c) 2018, 2021, Loongson Technology, and are made
# available on the same license terms set forth above.
#

# Support macro for PLATFORM_EXTRACT_TARGET_AND_BUILD.
# Converts autoconf style CPU name to OpenJDK style, into
# VAR_CPU, VAR_CPU_ARCH, VAR_CPU_BITS and VAR_CPU_ENDIAN.
Expand Down Expand Up @@ -554,6 +560,12 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
HOTSPOT_$1_CPU_DEFINE=PPC64
elif test "x$OPENJDK_$1_CPU" = xppc64le; then
HOTSPOT_$1_CPU_DEFINE=PPC64
elif test "x$OPENJDK_$1_CPU" = xmips64; then
HOTSPOT_$1_CPU_DEFINE=MIPS64
elif test "x$OPENJDK_$1_CPU" = xmips64el; then
HOTSPOT_$1_CPU_DEFINE=MIPS64
elif test "x$OPENJDK_$1_CPU" = xloongarch64; then
HOTSPOT_$1_CPU_DEFINE=LOONGARCH64
# The cpu defines below are for zero, we don't support them directly.
elif test "x$OPENJDK_$1_CPU" = xsparc; then
Expand Down
8 changes: 7 additions & 1 deletion src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1123,7 +1123,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
}
}


void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) {
ShouldNotReachHere();
}

void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
LIR_Opr src = op->in_opr();
Expand Down Expand Up @@ -1663,6 +1665,10 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
__ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
}

void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) {
ShouldNotReachHere();
}

void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");

Expand Down
19 changes: 15 additions & 4 deletions src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,18 +260,29 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
__ store(reg, addr);
}

void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
template<typename T>
void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) {
LIR_Opr reg = new_register(T_INT);
__ load(generate_address(base, disp, T_INT), reg, info);
__ cmp(condition, reg, LIR_OprFact::intConst(c));
__ cmp_branch(condition, reg, LIR_OprFact::intConst(c), T_INT, tgt);
}

void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
// Explicit instantiation for all supported types.
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*);
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*);
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*);

template<typename T>
void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) {
LIR_Opr reg1 = new_register(T_INT);
__ load(generate_address(base, disp, type), reg1, info);
__ cmp(condition, reg, reg1);
__ cmp_branch(condition, reg, reg1, type, tgt);
}

// Explicit instantiation for all supported types.
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*);
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*);
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*);

bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {

Expand Down
21 changes: 21 additions & 0 deletions src/hotspot/cpu/aarch64/c1_LIR_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,24 @@ void LIR_Address::verify() const {
"wrong type for addresses");
}
#endif // PRODUCT

template<typename T>
void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BasicType type, T tgt, CodeEmitInfo* info) {
cmp(condition, left, right, info);
branch(condition, type, tgt);
}

// Explicit instantiation for all supported types.
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, Label*, CodeEmitInfo*);
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, BlockBegin*, CodeEmitInfo*);
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, CodeStub*, CodeEmitInfo*);

void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BasicType type, BlockBegin* block, BlockBegin* unordered) {
cmp(condition, left, right);
branch(condition, type, block, unordered);
}

void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
cmp(condition, left, right);
cmove(condition, src1, src2, dst, type);
}
7 changes: 7 additions & 0 deletions src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1150,6 +1150,9 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
__ b(*(op->label()), acond);
}

void LIR_Assembler::emit_opCmpBranch(LIR_OpCmpBranch* op) {
ShouldNotReachHere();
}

void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
LIR_Opr src = op->in_opr();
Expand Down Expand Up @@ -3082,6 +3085,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ bind(*stub->continuation());
}

void LIR_Assembler::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr result, BasicType type) {
ShouldNotReachHere();
}

#ifdef ASSERT
// emit run-time assertion
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
Expand Down
19 changes: 14 additions & 5 deletions src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -423,18 +423,27 @@ void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
__ move(temp, addr);
}


void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
template<typename T>
void LIRGenerator::cmp_mem_int_branch(LIR_Condition condition, LIR_Opr base, int disp, int c, T tgt, CodeEmitInfo* info) {
__ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info);
__ cmp(condition, FrameMap::LR_opr, c);
__ cmp_branch(condition, FrameMap::LR_opr, c, T_INT, tgt);
}

// Explicit instantiation for all supported types.
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, Label*, CodeEmitInfo*);
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, BlockBegin*, CodeEmitInfo*);
template void LIRGenerator::cmp_mem_int_branch(LIR_Condition, LIR_Opr, int, int, CodeStub*, CodeEmitInfo*);

void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
template<typename T>
void LIRGenerator::cmp_reg_mem_branch(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, T tgt, CodeEmitInfo* info) {
__ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info);
__ cmp(condition, reg, FrameMap::LR_opr);
__ cmp_branch(condition, reg, FrameMap::LR_opr, type, tgt);
}

// Explicit instantiation for all supported types.
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, Label*, CodeEmitInfo*);
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, BlockBegin*, CodeEmitInfo*);
template void LIRGenerator::cmp_reg_mem_branch(LIR_Condition, LIR_Opr, LIR_Opr, int, BasicType, CodeStub*, CodeEmitInfo*);

bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
assert(left != result, "should be different registers");
Expand Down
21 changes: 21 additions & 0 deletions src/hotspot/cpu/arm/c1_LIR_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,3 +84,24 @@ void LIR_Address::verify() const {
#endif // AARCH64
}
#endif // PRODUCT

template<typename T>
void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BasicType type, T tgt, CodeEmitInfo* info) {
cmp(condition, left, right, info);
branch(condition, type, tgt);
}

// Explicit instantiation for all supported types.
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, Label*, CodeEmitInfo*);
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, BlockBegin*, CodeEmitInfo*);
template void LIR_List::cmp_branch(LIR_Condition, LIR_Opr, LIR_Opr, BasicType type, CodeStub*, CodeEmitInfo*);

void LIR_List::cmp_branch(LIR_Condition condition, LIR_Opr left, LIR_Opr right, BasicType type, BlockBegin* block, BlockBegin* unordered) {
cmp(condition, left, right);
branch(condition, type, block, unordered);
}

void LIR_List::cmp_cmove(LIR_Condition condition, LIR_Opr left, LIR_Opr right, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
cmp(condition, left, right);
cmove(condition, src1, src2, dst, type);
}
132 changes: 132 additions & 0 deletions src/hotspot/cpu/loongarch/abstractInterpreter_loongarch.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2021, Loongson Technology. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
#include "interpreter/interpreter.hpp"
#include "runtime/frame.inline.hpp"

// asm based interpreter deoptimization helpers
int AbstractInterpreter::size_activation(int max_stack,
int temps,
int extra_args,
int monitors,
int callee_params,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.

// fixed size of an interpreter frame:
int overhead = frame::java_frame_sender_sp_offset -
frame::interpreter_frame_initial_sp_offset;
// Our locals were accounted for by the caller (or last_frame_adjust
// on the transistion) Since the callee parameters already account
// for the callee's params we only need to account for the extra
// locals.
int size = overhead +
(callee_locals - callee_params)*Interpreter::stackElementWords +
monitors * frame::interpreter_frame_monitor_size() +
temps* Interpreter::stackElementWords + extra_args;

return size;
}

// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {

const int entry_size = frame::interpreter_frame_monitor_size();

// total overhead size: entry_size + (saved ebp thru expr stack bottom).
// be sure to change this if you add/subtract anything to/from the overhead area
const int overhead_size = -(frame::interpreter_frame_initial_sp_offset) + entry_size;

const int stub_code = 6; // see generate_call_stub
// return overhead_size + method->max_locals() + method->max_stack() + stub_code;
const int method_stack = (method->max_locals() + method->max_stack()) *
Interpreter::stackElementWords;
return overhead_size + method_stack + stub_code;
}

void AbstractInterpreter::layout_activation(Method* method,
int tempcount,
int popframe_extra_args,
int moncount,
int caller_actual_parameters,
int callee_param_count,
int callee_locals,
frame* caller,
frame* interpreter_frame,
bool is_top_frame,
bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
// The frame interpreter_frame, if not NULL, is guaranteed to be the
// right size, as determined by a previous call to this method.
// It is also guaranteed to be walkable even though it is in a skeletal state

// fixed size of an interpreter frame:

int max_locals = method->max_locals() * Interpreter::stackElementWords;
int extra_locals = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords;

#ifdef ASSERT
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif

interpreter_frame->interpreter_frame_set_method(method);
// NOTE the difference in using sender_sp and interpreter_frame_sender_sp
// interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
// and sender_sp is fp+8
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;

#ifdef ASSERT
if (caller->is_interpreted_frame()) {
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
}
#endif

interpreter_frame->interpreter_frame_set_locals(locals);
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
BasicObjectLock* monbot = montop - moncount;
interpreter_frame->interpreter_frame_set_monitor_end(montop - moncount);

//set last sp;
intptr_t* esp = (intptr_t*) monbot - tempcount*Interpreter::stackElementWords -
popframe_extra_args;
interpreter_frame->interpreter_frame_set_last_sp(esp);
// All frames but the initial interpreter frame we fill in have a
// value for sender_sp that allows walking the stack but isn't
// truly correct. Correct the value here.
//
if (extra_locals != 0 &&
interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
}
*interpreter_frame->interpreter_frame_cache_addr() = method->constants()->cache();
*interpreter_frame->interpreter_frame_mirror_addr() = method->method_holder()->java_mirror();
}

Loading

0 comments on commit e47c9d8

Please sign in to comment.