|
1 | 1 | /*
|
2 |
| - * Copyright (c) 2009, 2024, Oracle and/or its affiliates. All rights reserved. |
| 2 | + * Copyright (c) 2009, 2025, Oracle and/or its affiliates. All rights reserved. |
3 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
4 | 4 | *
|
5 | 5 | * This code is free software; you can redistribute it and/or modify it
|
@@ -2276,10 +2276,10 @@ public static class VexRVMOp extends VexOp {
|
2276 | 2276 | public static final VexRVMOp VPACKSSWB = new VexRVMOp("VPACKSSWB", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0x63, VEXOpAssertion.AVX1_AVX2_AVX512BW_VL, EVEXTuple.FVM, VEXPrefixConfig.WIG);
|
2277 | 2277 | public static final VexRVMOp VADDSUBPS = new VexRVMOp("VADDSUBPS", VEXPrefixConfig.P_F2, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xD0, VEXOpAssertion.AVX1);
|
2278 | 2278 | public static final VexRVMOp VADDSUBPD = new VexRVMOp("VADDSUBPD", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xD0, VEXOpAssertion.AVX1);
|
2279 |
| - public static final VexRVMOp VPAND = new VexRVMOp("VPAND", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDB, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1); |
2280 |
| - public static final VexRVMOp VPANDN = new VexRVMOp("VPANDN", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDF, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1); |
2281 |
| - public static final VexRVMOp VPOR = new VexRVMOp("VPOR", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEB, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1); |
2282 |
| - public static final VexRVMOp VPXOR = new VexRVMOp("VPXOR", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEF, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1); |
| 2279 | + public static final VexRVMOp VPAND = new VexRVMOp("VPAND", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDB, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W0); |
| 2280 | + public static final VexRVMOp VPANDN = new VexRVMOp("VPANDN", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDF, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W0); |
| 2281 | + public static final VexRVMOp VPOR = new VexRVMOp("VPOR", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEB, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W0); |
| 2282 | + public static final VexRVMOp VPXOR = new VexRVMOp("VPXOR", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEF, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W0); |
2283 | 2283 | public static final VexRVMOp VPADDB = new VexRVMOp("VPADDB", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xFC, VEXOpAssertion.AVX1_AVX2_AVX512BW_VL, EVEXTuple.FVM, VEXPrefixConfig.WIG);
|
2284 | 2284 | public static final VexRVMOp VPADDW = new VexRVMOp("VPADDW", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xFD, VEXOpAssertion.AVX1_AVX2_AVX512BW_VL, EVEXTuple.FVM, VEXPrefixConfig.WIG);
|
2285 | 2285 | public static final VexRVMOp VPADDD = new VexRVMOp("VPADDD", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xFE, VEXOpAssertion.AVX1_AVX2_AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W0);
|
@@ -2375,10 +2375,14 @@ public static class VexRVMOp extends VexOp {
|
2375 | 2375 | public static final VexRVMOp EVMAXSD = new VexRVMOp("EVMAXSD", VMAXSD);
|
2376 | 2376 | public static final VexRVMOp EVPACKUSDW = new VexRVMOp("EVPACKUSDW", VPACKUSDW);
|
2377 | 2377 | public static final VexRVMOp EVPACKUSWB = new VexRVMOp("EVPACKUSWB", VPACKUSWB);
|
2378 |
| - public static final VexRVMOp EVPAND = new VexRVMOp("EVPAND", VPAND); |
2379 |
| - public static final VexRVMOp EVPANDN = new VexRVMOp("EVPANDN", VPANDN); |
2380 |
| - public static final VexRVMOp EVPOR = new VexRVMOp("EVPOR", VPOR); |
2381 |
| - public static final VexRVMOp EVPXOR = new VexRVMOp("EVPXOR", VPXOR); |
| 2378 | + public static final VexRVMOp EVPANDD = new VexRVMOp("EVPANDD", VPAND); |
| 2379 | + public static final VexRVMOp EVPANDQ = new VexRVMOp("EVPANDQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDB, VEXOpAssertion.AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1, true); |
| 2380 | + public static final VexRVMOp EVPANDND = new VexRVMOp("EVPANDND", VPANDN); |
| 2381 | + public static final VexRVMOp EVPANDNQ = new VexRVMOp("EVPANDNQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xDF, VEXOpAssertion.AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1, true); |
| 2382 | + public static final VexRVMOp EVPORD = new VexRVMOp("EVPORD", VPOR); |
| 2383 | + public static final VexRVMOp EVPORQ = new VexRVMOp("EVPORQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEB, VEXOpAssertion.AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1, true); |
| 2384 | + public static final VexRVMOp EVPXORD = new VexRVMOp("EVPXORD", VPXOR); |
| 2385 | + public static final VexRVMOp EVPXORQ = new VexRVMOp("EVPXORQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F, VEXPrefixConfig.WIG, 0xEF, VEXOpAssertion.AVX512F_VL, EVEXTuple.FVM, VEXPrefixConfig.W1, true); |
2382 | 2386 | public static final VexRVMOp EVPADDB = new VexRVMOp("EVPADDB", VPADDB);
|
2383 | 2387 | public static final VexRVMOp EVPADDW = new VexRVMOp("EVPADDW", VPADDW);
|
2384 | 2388 | public static final VexRVMOp EVPADDD = new VexRVMOp("EVPADDD", VPADDD);
|
@@ -3145,6 +3149,71 @@ public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1,
|
3145 | 3149 | }
|
3146 | 3150 | }
|
3147 | 3151 |
|
| 3152 | + /** |
| 3153 | + * VEX-encoded comparison operation with an operand order of RVMI. The immediate operand is a |
| 3154 | + * comparison operator. |
| 3155 | + */ |
| 3156 | + public static final class VexIntegerCompareOp extends VexOp { |
| 3157 | + // @formatter:off |
| 3158 | + public static final VexIntegerCompareOp EVPCMPB = new VexIntegerCompareOp("EVPCMPB", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W0, 0x3F, VEXOpAssertion.MASK_XMM_XMM_AVX512BW_VL, EVEXTuple.FVM); |
| 3159 | + public static final VexIntegerCompareOp EVPCMPW = new VexIntegerCompareOp("EVPCMPW", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W1, 0x3F, VEXOpAssertion.MASK_XMM_XMM_AVX512BW_VL, EVEXTuple.FVM); |
| 3160 | + public static final VexIntegerCompareOp EVPCMPD = new VexIntegerCompareOp("EVPCMPD", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W0, 0x1F, VEXOpAssertion.MASK_XMM_XMM_AVX512F_VL, EVEXTuple.FVM); |
| 3161 | + public static final VexIntegerCompareOp EVPCMPQ = new VexIntegerCompareOp("EVPCMPQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W1, 0x1F, VEXOpAssertion.MASK_XMM_XMM_AVX512F_VL, EVEXTuple.FVM); |
| 3162 | + |
| 3163 | + public static final VexIntegerCompareOp EVPCMPUB = new VexIntegerCompareOp("EVPCMPUB", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W0, 0x3E, VEXOpAssertion.MASK_XMM_XMM_AVX512BW_VL, EVEXTuple.FVM); |
| 3164 | + public static final VexIntegerCompareOp EVPCMPUW = new VexIntegerCompareOp("EVPCMPUW", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W1, 0x3E, VEXOpAssertion.MASK_XMM_XMM_AVX512BW_VL, EVEXTuple.FVM); |
| 3165 | + public static final VexIntegerCompareOp EVPCMPUD = new VexIntegerCompareOp("EVPCMPUD", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W0, 0x1E, VEXOpAssertion.MASK_XMM_XMM_AVX512F_VL, EVEXTuple.FVM); |
| 3166 | + public static final VexIntegerCompareOp EVPCMPUQ = new VexIntegerCompareOp("EVPCMPUQ", VEXPrefixConfig.P_66, VEXPrefixConfig.M_0F3A, VEXPrefixConfig.W1, 0x1E, VEXOpAssertion.MASK_XMM_XMM_AVX512F_VL, EVEXTuple.FVM); |
| 3167 | + // @formatter:on |
| 3168 | + |
| 3169 | + public enum Predicate { |
| 3170 | + EQ(0), |
| 3171 | + LT(1), |
| 3172 | + LE(2), |
| 3173 | + FALSE(3), |
| 3174 | + NEQ(4), |
| 3175 | + NLT(5), |
| 3176 | + NLE(6), |
| 3177 | + TRUE(7); |
| 3178 | + |
| 3179 | + private int imm8; |
| 3180 | + |
| 3181 | + Predicate(int imm8) { |
| 3182 | + this.imm8 = imm8; |
| 3183 | + } |
| 3184 | + |
| 3185 | + public static Predicate getPredicate(Condition condition) { |
| 3186 | + return switch (condition) { |
| 3187 | + case EQ -> EQ; |
| 3188 | + case NE -> NEQ; |
| 3189 | + case LT, BT -> LT; |
| 3190 | + case LE, BE -> LE; |
| 3191 | + case GT, AT -> NLE; |
| 3192 | + case GE, AE -> NLT; |
| 3193 | + default -> throw GraalError.shouldNotReachHereUnexpectedValue(condition); |
| 3194 | + }; |
| 3195 | + } |
| 3196 | + } |
| 3197 | + |
| 3198 | + private VexIntegerCompareOp(String opcode, int pp, int mmmmm, int wEvex, int op, VEXOpAssertion assertion, EVEXTuple evexTuple) { |
| 3199 | + super(opcode, pp, mmmmm, wEvex, op, assertion, evexTuple, wEvex, true); |
| 3200 | + } |
| 3201 | + |
| 3202 | + public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, Register src2, Register mask, Predicate p) { |
| 3203 | + emitVexOrEvex(asm, dst, src1, src2, mask, size, pp, mmmmm, w, wEvex, Z0, B0); |
| 3204 | + asm.emitByte(op); |
| 3205 | + asm.emitModRM(dst, src2); |
| 3206 | + asm.emitByte(p.imm8); |
| 3207 | + } |
| 3208 | + |
| 3209 | + public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, Register mask, Predicate p, int b) { |
| 3210 | + emitVexOrEvex(asm, dst, src1, src2, mask, size, pp, mmmmm, w, wEvex, Z0, b); |
| 3211 | + asm.emitByte(op); |
| 3212 | + asm.emitOperandHelper(dst, src2, 1, getDisp8Scale(isEvex, size)); |
| 3213 | + asm.emitByte(p.imm8); |
| 3214 | + } |
| 3215 | + } |
| 3216 | + |
3148 | 3217 | /**
|
3149 | 3218 | * VEX-encoded comparison operation with an operand order of RVMI. The immediate operand is a
|
3150 | 3219 | * comparison operator.
|
@@ -3262,14 +3331,22 @@ public VexFloatCompareOp encoding(AMD64SIMDInstructionEncoding encoding) {
|
3262 | 3331 | }
|
3263 | 3332 |
|
3264 | 3333 | public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, Register src2, Predicate p) {
|
3265 |
| - emitVexOrEvex(asm, dst, src1, src2, size, pp, mmmmm, w, wEvex); |
| 3334 | + emit(asm, size, dst, src1, src2, Register.None, p); |
| 3335 | + } |
| 3336 | + |
| 3337 | + public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, Predicate p) { |
| 3338 | + emit(asm, size, dst, src1, src2, Register.None, p, B0); |
| 3339 | + } |
| 3340 | + |
| 3341 | + public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, Register src2, Register mask, Predicate p) { |
| 3342 | + emitVexOrEvex(asm, dst, src1, src2, mask, size, pp, mmmmm, w, wEvex, Z0, B0); |
3266 | 3343 | asm.emitByte(op);
|
3267 | 3344 | asm.emitModRM(dst, src2);
|
3268 | 3345 | asm.emitByte(p.imm8);
|
3269 | 3346 | }
|
3270 | 3347 |
|
3271 |
| - public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, Predicate p) { |
3272 |
| - emitVexOrEvex(asm, dst, src1, src2, size, pp, mmmmm, w, wEvex); |
| 3348 | + public void emit(AMD64Assembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, Register mask, Predicate p, int b) { |
| 3349 | + emitVexOrEvex(asm, dst, src1, src2, mask, size, pp, mmmmm, w, wEvex, Z0, b); |
3273 | 3350 | asm.emitByte(op);
|
3274 | 3351 | asm.emitOperandHelper(dst, src2, 1, getDisp8Scale(isEvex, size));
|
3275 | 3352 | asm.emitByte(p.imm8);
|
@@ -6199,6 +6276,6 @@ public final void evpternlogq(Register dst, int imm8, Register src1, Register sr
|
6199 | 6276 | }
|
6200 | 6277 |
|
6201 | 6278 | public final void evpxorq(Register dst, Register mask, Register nds, AMD64Address src) {
|
6202 |
| - VexRVMOp.EVPXOR.emit(this, AVXSize.ZMM, dst, nds, src, mask); |
| 6279 | + VexRVMOp.EVPXORQ.emit(this, AVXSize.ZMM, dst, nds, src, mask); |
6203 | 6280 | }
|
6204 | 6281 | }
|
0 commit comments