55
55
import jdk .graal .compiler .asm .BranchTargetOutOfBoundsException ;
56
56
import jdk .graal .compiler .asm .Label ;
57
57
import jdk .graal .compiler .asm .amd64 .AVXKind .AVXSize ;
58
+ import jdk .graal .compiler .core .amd64 .MemoryReadInterceptor ;
58
59
import jdk .graal .compiler .core .common .GraalOptions ;
59
60
import jdk .graal .compiler .core .common .NumUtil ;
60
61
import jdk .graal .compiler .core .common .Stride ;
73
74
/**
74
75
* This class implements an assembler that can encode most X86 instructions.
75
76
*/
76
- public class AMD64Assembler extends AMD64BaseAssembler {
77
+ public class AMD64Assembler extends AMD64BaseAssembler implements MemoryReadInterceptor {
77
78
78
79
public static class Options {
79
80
// @formatter:off
@@ -474,13 +475,15 @@ public void emit(AMD64Assembler asm, OperandSize size, Register dst, Register sr
474
475
public void emit (AMD64Assembler asm , OperandSize size , Register dst , AMD64Address src ) {
475
476
assert verify (asm , size , dst , null );
476
477
assert !isSSEInstruction ();
478
+ asm .interceptMemorySrcOperands (src );
477
479
emitOpcode (asm , size , getRXB (dst , src ), dst .encoding , 0 );
478
480
asm .emitOperandHelper (dst , src , 0 );
479
481
}
480
482
481
483
public void emit (AMD64Assembler asm , OperandSize size , Register dst , AMD64Address src , boolean force4Byte ) {
482
484
assert verify (asm , size , dst , null );
483
485
assert !isSSEInstruction ();
486
+ asm .interceptMemorySrcOperands (src );
484
487
emitOpcode (asm , size , getRXB (dst , src ), dst .encoding , 0 );
485
488
asm .emitOperandHelper (dst , src , force4Byte , 0 );
486
489
}
@@ -589,29 +592,35 @@ public void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
589
592
*/
590
593
public static class AMD64MIOp extends AMD64ImmOp {
591
594
// @formatter:off
592
- public static final AMD64MIOp BT = new AMD64MIOp ("BT" , true , P_0F , 0xBA , 4 , OpAssertion .WordOrLargerAssertion );
593
- public static final AMD64MIOp BTR = new AMD64MIOp ("BTR" , true , P_0F , 0xBA , 6 , OpAssertion .WordOrLargerAssertion );
594
- public static final AMD64MIOp MOVB = new AMD64MIOp ("MOVB" , true , 0xC6 , 0 , OpAssertion .ByteAssertion );
595
- public static final AMD64MIOp MOV = new AMD64MIOp ("MOV" , false , 0xC7 , 0 , OpAssertion .WordOrLargerAssertion );
596
- public static final AMD64MIOp SAR = new AMD64MIOp ("SAR" , true , 0xC1 , 7 , OpAssertion .WordOrLargerAssertion );
597
- public static final AMD64MIOp SHL = new AMD64MIOp ("SHL" , true , 0xC1 , 4 , OpAssertion .WordOrLargerAssertion );
598
- public static final AMD64MIOp SHR = new AMD64MIOp ("SHR" , true , 0xC1 , 5 , OpAssertion .WordOrLargerAssertion );
599
- public static final AMD64MIOp TEST = new AMD64MIOp ("TEST" , false , 0xF7 , 0 );
595
+ public static final AMD64MIOp BT = new AMD64MIOp ("BT" , true , P_0F , 0xBA , 4 , true , OpAssertion .WordOrLargerAssertion );
596
+ public static final AMD64MIOp BTR = new AMD64MIOp ("BTR" , true , P_0F , 0xBA , 6 , true , OpAssertion .WordOrLargerAssertion );
597
+ public static final AMD64MIOp MOVB = new AMD64MIOp ("MOVB" , true , 0xC6 , 0 , false , OpAssertion .ByteAssertion );
598
+ public static final AMD64MIOp MOV = new AMD64MIOp ("MOV" , false , 0xC7 , 0 , false , OpAssertion .WordOrLargerAssertion );
599
+ public static final AMD64MIOp SAR = new AMD64MIOp ("SAR" , true , 0xC1 , 7 , true , OpAssertion .WordOrLargerAssertion );
600
+ public static final AMD64MIOp SHL = new AMD64MIOp ("SHL" , true , 0xC1 , 4 , true , OpAssertion .WordOrLargerAssertion );
601
+ public static final AMD64MIOp SHR = new AMD64MIOp ("SHR" , true , 0xC1 , 5 , true , OpAssertion .WordOrLargerAssertion );
602
+ public static final AMD64MIOp TEST = new AMD64MIOp ("TEST" , false , 0xF7 , 0 , true );
600
603
// @formatter:on
601
604
602
605
private final int ext ;
606
+ /**
607
+ * Defines if the Op reads from memory and makes the result observable by the user (e.g.
608
+ * spilling to a register or in a flag).
609
+ */
610
+ private final boolean isMemRead ;
603
611
604
- protected AMD64MIOp (String opcode , boolean immIsByte , int op , int ext ) {
605
- this (opcode , immIsByte , op , ext , OpAssertion .WordOrLargerAssertion );
612
+ protected AMD64MIOp (String opcode , boolean immIsByte , int op , int ext , boolean isMemRead ) {
613
+ this (opcode , immIsByte , op , ext , isMemRead , OpAssertion .WordOrLargerAssertion );
606
614
}
607
615
608
- protected AMD64MIOp (String opcode , boolean immIsByte , int op , int ext , OpAssertion assertion ) {
609
- this (opcode , immIsByte , 0 , op , ext , assertion );
616
+ protected AMD64MIOp (String opcode , boolean immIsByte , int op , int ext , boolean isMemRead , OpAssertion assertion ) {
617
+ this (opcode , immIsByte , 0 , op , ext , isMemRead , assertion );
610
618
}
611
619
612
- protected AMD64MIOp (String opcode , boolean immIsByte , int prefix , int op , int ext , OpAssertion assertion ) {
620
+ protected AMD64MIOp (String opcode , boolean immIsByte , int prefix , int op , int ext , boolean isMemRead , OpAssertion assertion ) {
613
621
super (opcode , immIsByte , prefix , op , assertion );
614
622
this .ext = ext ;
623
+ this .isMemRead = isMemRead ;
615
624
}
616
625
617
626
public final void emit (AMD64Assembler asm , OperandSize size , Register dst , int imm ) {
@@ -631,22 +640,29 @@ public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int i
631
640
}
632
641
}
633
642
634
- public final void emit (AMD64Assembler asm , OperandSize size , AMD64Address dst , int imm ) {
635
- emit (asm , size , dst , imm , false );
643
+ public final void emit (AMD64Assembler asm , OperandSize size , AMD64Address address , int imm ) {
644
+ emit (asm , size , address , imm , false );
636
645
}
637
646
638
- public final void emit (AMD64Assembler asm , OperandSize size , AMD64Address dst , int imm , boolean annotateImm ) {
647
+ public final void emit (AMD64Assembler asm , OperandSize size , AMD64Address address , int imm , boolean annotateImm ) {
639
648
assert verify (asm , size , null , null );
649
+ if (isMemRead ) {
650
+ asm .interceptMemorySrcOperands (address );
651
+ }
640
652
int insnPos = asm .position ();
641
- emitOpcode (asm , size , getRXB (null , dst ), 0 , 0 );
642
- asm .emitOperandHelper (ext , dst , immediateSize (size ));
653
+ emitOpcode (asm , size , getRXB (null , address ), 0 , 0 );
654
+ asm .emitOperandHelper (ext , address , immediateSize (size ));
643
655
int immPos = asm .position ();
644
656
emitImmediate (asm , size , imm );
645
657
int nextInsnPos = asm .position ();
646
658
if (annotateImm && asm .codePatchingAnnotationConsumer != null ) {
647
659
asm .codePatchingAnnotationConsumer .accept (new OperandDataAnnotation (insnPos , immPos , nextInsnPos - immPos , nextInsnPos ));
648
660
}
649
661
}
662
+
663
+ public boolean isMemRead () {
664
+ return isMemRead ;
665
+ }
650
666
}
651
667
652
668
/**
@@ -721,6 +737,7 @@ public void emit(AMD64Assembler asm, OperandSize size, Register dst, Register sr
721
737
722
738
public void emit (AMD64Assembler asm , OperandSize size , Register dst , AMD64Address src , int imm ) {
723
739
assert verify (asm , size , dst , null );
740
+ asm .interceptMemorySrcOperands (src );
724
741
emitOpcode (asm , size , getRXB (dst , src ), dst .encoding , 0 );
725
742
asm .emitOperandHelper (dst , src , immediateSize (size ));
726
743
emitImmediate (asm , size , imm );
@@ -883,6 +900,7 @@ public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Regis
883
900
public final void emit (AMD64Assembler asm , OperandSize size , Register dst , AMD64Address src ) {
884
901
assert verify (asm , size , dst , null );
885
902
assert isSSEInstruction ();
903
+ asm .interceptMemorySrcOperands (src );
886
904
// MOVSS/SD are not RVM instruction when the dst is an address
887
905
Register nds = (this == MOVSS || this == MOVSD ) ? Register .None : preferredNDS .getNds (dst , src );
888
906
asm .simdPrefix (dst , nds , src , size , prefix1 , prefix2 , size == OperandSize .QWORD );
@@ -972,6 +990,7 @@ public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Regis
972
990
public final void emit (AMD64Assembler asm , OperandSize size , Register dst , AMD64Address src , int imm ) {
973
991
assert verify (asm , size , dst , null );
974
992
assert isSSEInstruction ();
993
+ asm .interceptMemorySrcOperands (src );
975
994
asm .simdPrefix (dst , preferredNDS .getNds (dst , src ), src , size , prefix1 , prefix2 , w );
976
995
asm .emitByte (op );
977
996
asm .emitOperandHelper (dst , src , immediateSize (size ));
@@ -1090,12 +1109,12 @@ public static final class AMD64BinaryArithmetic {
1090
1109
private AMD64BinaryArithmetic (String opcode , int code ) {
1091
1110
int baseOp = code << 3 ;
1092
1111
1093
- byteImmOp = new AMD64MIOp (opcode , true , 0 , 0x80 , code , OpAssertion .ByteAssertion );
1112
+ byteImmOp = new AMD64MIOp (opcode , true , 0 , 0x80 , code , false , OpAssertion .ByteAssertion );
1094
1113
byteMrOp = new AMD64MROp (opcode , 0 , baseOp , OpAssertion .ByteAssertion );
1095
1114
byteRmOp = new AMD64RMOp (opcode , 0 , baseOp | 0x02 , OpAssertion .ByteAssertion );
1096
1115
1097
- immOp = new AMD64MIOp (opcode , false , 0 , 0x81 , code , OpAssertion .WordOrLargerAssertion );
1098
- immSxOp = new AMD64MIOp (opcode , true , 0 , 0x83 , code , OpAssertion .WordOrLargerAssertion );
1116
+ immOp = new AMD64MIOp (opcode , false , 0 , 0x81 , code , false , OpAssertion .WordOrLargerAssertion );
1117
+ immSxOp = new AMD64MIOp (opcode , true , 0 , 0x83 , code , false , OpAssertion .WordOrLargerAssertion );
1099
1118
mrOp = new AMD64MROp (opcode , 0 , baseOp | 0x01 , OpAssertion .WordOrLargerAssertion );
1100
1119
rmOp = new AMD64RMOp (opcode , 0 , baseOp | 0x03 , OpAssertion .WordOrLargerAssertion );
1101
1120
}
@@ -1148,7 +1167,7 @@ public static final class AMD64Shift {
1148
1167
private AMD64Shift (String opcode , int code ) {
1149
1168
m1Op = new AMD64MOp (opcode , 0 , 0xD1 , code , OpAssertion .WordOrLargerAssertion );
1150
1169
mcOp = new AMD64MOp (opcode , 0 , 0xD3 , code , OpAssertion .WordOrLargerAssertion );
1151
- miOp = new AMD64MIOp (opcode , true , 0 , 0xC1 , code , OpAssertion .WordOrLargerAssertion );
1170
+ miOp = new AMD64MIOp (opcode , true , 0 , 0xC1 , code , true , OpAssertion .WordOrLargerAssertion );
1152
1171
}
1153
1172
}
1154
1173
@@ -1502,6 +1521,7 @@ protected final void emitVexOrEvex(AMD64Assembler asm, Register dst, Register nd
1502
1521
1503
1522
protected final void emitVexOrEvex (AMD64Assembler asm , Register dst , Register nds , AMD64Address src , Register opmask , AVXSize size , int actualPP , int actualMMMMM , int actualW ,
1504
1523
int actualWEvex , int z , int b ) {
1524
+ asm .interceptMemorySrcOperands (src );
1505
1525
if (isEvex ) {
1506
1526
checkEvex (asm , size , dst , opmask , z , nds , null , b );
1507
1527
asm .evexPrefix (dst , opmask , nds , src , size , actualPP , actualMMMMM , actualWEvex , z , b );
@@ -4202,6 +4222,7 @@ public final void cmovl(ConditionFlag cc, Register dst, Register src) {
4202
4222
}
4203
4223
4204
4224
public final void cmovl (ConditionFlag cc , Register dst , AMD64Address src ) {
4225
+ interceptMemorySrcOperands (src );
4205
4226
prefix (src , dst );
4206
4227
emitByte (0x0F );
4207
4228
emitByte (0x40 | cc .getValue ());
@@ -4216,6 +4237,7 @@ public final void cmovq(ConditionFlag cc, Register dst, Register src) {
4216
4237
}
4217
4238
4218
4239
public final void cmovq (ConditionFlag cc , Register dst , AMD64Address src ) {
4240
+ interceptMemorySrcOperands (src );
4219
4241
prefixq (src , dst );
4220
4242
emitByte (0x0F );
4221
4243
emitByte (0x40 | cc .getValue ());
@@ -4244,6 +4266,7 @@ public final void fincstp() {
4244
4266
}
4245
4267
4246
4268
public final void fldd (AMD64Address src ) {
4269
+ interceptMemorySrcOperands (src );
4247
4270
emitByte (0xDD );
4248
4271
emitOperandHelper (0 , src , 0 );
4249
4272
}
@@ -4259,6 +4282,7 @@ public final void fldln2() {
4259
4282
}
4260
4283
4261
4284
public final void flds (AMD64Address src ) {
4285
+ interceptMemorySrcOperands (src );
4262
4286
emitByte (0xD9 );
4263
4287
emitOperandHelper (0 , src , 0 );
4264
4288
}
@@ -4290,11 +4314,13 @@ public final void fstp(int i) {
4290
4314
}
4291
4315
4292
4316
public final void fstpd (AMD64Address src ) {
4317
+ interceptMemorySrcOperands (src );
4293
4318
emitByte (0xDD );
4294
4319
emitOperandHelper (3 , src , 0 );
4295
4320
}
4296
4321
4297
4322
public final void fstps (AMD64Address src ) {
4323
+ interceptMemorySrcOperands (src );
4298
4324
emitByte (0xD9 );
4299
4325
emitOperandHelper (3 , src , 0 );
4300
4326
}
@@ -4351,13 +4377,13 @@ public final void leave() {
4351
4377
emitByte (0xC9 );
4352
4378
}
4353
4379
4354
- public final void lfence () {
4380
+ public void lfence () {
4355
4381
emitByte (0x0f );
4356
4382
emitByte (0xae );
4357
4383
emitByte (0xe8 );
4358
4384
}
4359
4385
4360
- public final void lock () {
4386
+ public void lock () {
4361
4387
emitByte (0xF0 );
4362
4388
}
4363
4389
@@ -4408,6 +4434,7 @@ public final void movlhps(Register dst, Register src) {
4408
4434
*/
4409
4435
public final void movlpd (Register dst , AMD64Address src ) {
4410
4436
assert inRC (XMM , dst );
4437
+ interceptMemorySrcOperands (src );
4411
4438
simdPrefix (dst , dst , src , OperandSize .PD , P_0F , false );
4412
4439
emitByte (0x12 );
4413
4440
emitOperandHelper (dst , src , 0 );
@@ -4424,6 +4451,7 @@ public final void movq(Register dst, AMD64Address src, boolean force4BytesDispla
4424
4451
// An alternative instruction would be 66 REX.W 0F 6E /r. We prefer the REX.W free
4425
4452
// format, because it would allow us to emit 2-bytes-prefixed vex-encoding instruction
4426
4453
// when applicable.
4454
+ interceptMemorySrcOperands (src );
4427
4455
simdPrefix (dst , Register .None , src , OperandSize .SS , P_0F , false );
4428
4456
emitByte (0x7E );
4429
4457
emitOperandHelper (dst , src , force4BytesDisplacement , 0 );
@@ -4868,7 +4896,7 @@ public final void cmpwImm16(AMD64Address dst, int imm16) {
4868
4896
* adr if so; otherwise, the value at adr is loaded into X86.rax,. The ZF is set if the compared
4869
4897
* values were equal, and cleared otherwise.
4870
4898
*/
4871
- public final void cmpxchgb (Register reg , AMD64Address adr ) { // cmpxchg
4899
+ public final void cmpxchgb (AMD64Address adr , Register reg ) { // cmpxchg
4872
4900
AMD64MROp .CMPXCHGB .emit (this , OperandSize .BYTE , adr , reg );
4873
4901
}
4874
4902
@@ -4877,7 +4905,7 @@ public final void cmpxchgb(Register reg, AMD64Address adr) { // cmpxchg
4877
4905
* into adr if so; otherwise, the value at adr is loaded into X86.rax,. The ZF is set if the
4878
4906
* compared values were equal, and cleared otherwise.
4879
4907
*/
4880
- public final void cmpxchgl (Register reg , AMD64Address adr ) { // cmpxchg
4908
+ public final void cmpxchgl (AMD64Address adr , Register reg ) { // cmpxchg
4881
4909
AMD64MROp .CMPXCHG .emit (this , OperandSize .DWORD , adr , reg );
4882
4910
}
4883
4911
@@ -4890,7 +4918,7 @@ public final void cmpxchgq(Register reg, AMD64Address adr) {
4890
4918
* into adr if so; otherwise, the value at adr is loaded into X86.rax,. The ZF is set if the
4891
4919
* compared values were equal, and cleared otherwise.
4892
4920
*/
4893
- public final void cmpxchgw (Register reg , AMD64Address adr ) { // cmpxchg
4921
+ public final void cmpxchgw (AMD64Address adr , Register reg ) { // cmpxchg
4894
4922
AMD64MROp .CMPXCHG .emit (this , OperandSize .WORD , adr , reg );
4895
4923
}
4896
4924
@@ -6282,4 +6310,5 @@ public final void evpternlogq(Register dst, int imm8, Register src1, Register sr
6282
6310
public final void evpxorq (Register dst , Register mask , Register nds , AMD64Address src ) {
6283
6311
VexRVMOp .EVPXORQ .emit (this , AVXSize .ZMM , dst , nds , src , mask );
6284
6312
}
6313
+
6285
6314
}
0 commit comments