Merge branch 'master' of github.com:aquynh/capstone
This commit is contained in:
commit
047c7202ea
|
@ -1,4 +1,4 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x3c,0x04,0xde,0xae = lui $4, %hi(addr)
|
||||
0x03,0xe0,0x00,0x08 = jr $31
|
||||
0x80,0x82,0xbe,0xef = lb $2, %lo(addr)($4)
|
||||
0x3c,0x04,0xde,0xae = lui $a0, %hi(addr)
|
||||
0x03,0xe0,0x00,0x08 = jr $ra
|
||||
0x80,0x82,0xbe,0xef = lb $v0, %lo(addr)($a0)
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO, None
|
||||
0x00,0xe6,0x49,0x10 = add $9, $6, $7
|
||||
0x11,0x26,0x45,0x67 = addi $9, $6, 17767
|
||||
0x31,0x26,0xc5,0x67 = addiu $9, $6, -15001
|
||||
0x11,0x26,0x45,0x67 = addi $9, $6, 17767
|
||||
0x31,0x26,0xc5,0x67 = addiu $9, $6, -15001
|
||||
0x00,0xe6,0x49,0x50 = addu $9, $6, $7
|
||||
0x00,0xe6,0x49,0x90 = sub $9, $6, $7
|
||||
0x00,0xa3,0x21,0xd0 = subu $4, $3, $5
|
||||
0x00,0xe0,0x31,0x90 = neg $6, $7
|
||||
0x00,0xe0,0x31,0xd0 = negu $6, $7
|
||||
0x00,0x08,0x39,0x50 = move $7, $8
|
||||
0x00,0xa3,0x1b,0x50 = slt $3, $3, $5
|
||||
0x90,0x63,0x00,0x67 = slti $3, $3, 103
|
||||
0x90,0x63,0x00,0x67 = slti $3, $3, 103
|
||||
0xb0,0x63,0x00,0x67 = sltiu $3, $3, 103
|
||||
0x00,0xa3,0x1b,0x90 = sltu $3, $3, $5
|
||||
0x41,0xa9,0x45,0x67 = lui $9, 17767
|
||||
0x00,0xe6,0x4a,0x50 = and $9, $6, $7
|
||||
0xd1,0x26,0x45,0x67 = andi $9, $6, 17767
|
||||
0xd1,0x26,0x45,0x67 = andi $9, $6, 17767
|
||||
0x00,0xa4,0x1a,0x90 = or $3, $4, $5
|
||||
0x51,0x26,0x45,0x67 = ori $9, $6, 17767
|
||||
0x00,0xa3,0x1b,0x10 = xor $3, $3, $5
|
||||
0x71,0x26,0x45,0x67 = xori $9, $6, 17767
|
||||
0x71,0x26,0x45,0x67 = xori $9, $6, 17767
|
||||
0x00,0xe6,0x4a,0xd0 = nor $9, $6, $7
|
||||
0x00,0x08,0x3a,0xd0 = not $7, $8
|
||||
0x00,0xe6,0x4a,0x10 = mul $9, $6, $7
|
||||
0x00,0xe9,0x8b,0x3c = mult $9, $7
|
||||
0x00,0xe9,0x9b,0x3c = multu $9, $7
|
||||
0x00,0xe9,0xab,0x3c = div $zero, $9, $7
|
||||
0x00,0xe9,0xbb,0x3c = divu $zero, $9, $7
|
||||
0x00,0xe6,0x49,0x10 = add $t1, $a2, $a3
|
||||
0x11,0x26,0x45,0x67 = addi $t1, $a2, 17767
|
||||
0x31,0x26,0xc5,0x67 = addiu $t1, $a2, -15001
|
||||
0x11,0x26,0x45,0x67 = addi $t1, $a2, 17767
|
||||
0x31,0x26,0xc5,0x67 = addiu $t1, $a2, -15001
|
||||
0x00,0xe6,0x49,0x50 = addu $t1, $a2, $a3
|
||||
0x00,0xe6,0x49,0x90 = sub $t1, $a2, $a3
|
||||
0x00,0xa3,0x21,0xd0 = subu $a0, $v1, $a1
|
||||
0x00,0xe0,0x31,0x90 = sub $a2, $zero, $a3
|
||||
0x00,0xe0,0x31,0xd0 = subu $a2, $zero, $a3
|
||||
0x00,0x08,0x39,0x50 = addu $a3, $t0, $zero
|
||||
0x00,0xa3,0x1b,0x50 = slt $v1, $v1, $a1
|
||||
0x90,0x63,0x00,0x67 = slti $v1, $v1, 103
|
||||
0x90,0x63,0x00,0x67 = slti $v1, $v1, 103
|
||||
0xb0,0x63,0x00,0x67 = sltiu $v1, $v1, 103
|
||||
0x00,0xa3,0x1b,0x90 = sltu $v1, $v1, $a1
|
||||
0x41,0xa9,0x45,0x67 = lui $t1, 17767
|
||||
0x00,0xe6,0x4a,0x50 = and $t1, $a2, $a3
|
||||
0xd1,0x26,0x45,0x67 = andi $t1, $a2, 17767
|
||||
0xd1,0x26,0x45,0x67 = andi $t1, $a2, 17767
|
||||
0x00,0xa4,0x1a,0x90 = or $v1, $a0, $a1
|
||||
0x51,0x26,0x45,0x67 = ori $t1, $a2, 17767
|
||||
0x00,0xa3,0x1b,0x10 = xor $v1, $v1, $a1
|
||||
0x71,0x26,0x45,0x67 = xori $t1, $a2, 17767
|
||||
0x71,0x26,0x45,0x67 = xori $t1, $a2, 17767
|
||||
0x00,0xe6,0x4a,0xd0 = nor $t1, $a2, $a3
|
||||
0x00,0x08,0x3a,0xd0 = not $a3, $t0
|
||||
0x00,0xe6,0x4a,0x10 = mul $t1, $a2, $a3
|
||||
0x00,0xe9,0x8b,0x3c = mult $t1, $a3
|
||||
0x00,0xe9,0x9b,0x3c = multu $t1, $a3
|
||||
0x00,0xe9,0xab,0x3c = div $zero, $t1, $a3
|
||||
0x00,0xe9,0xbb,0x3c = divu $zero, $t1, $a3
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0xe6,0x00,0x10,0x49 = add $9, $6, $7
|
||||
0x26,0x11,0x67,0x45 = addi $9, $6, 17767
|
||||
0x26,0x31,0x67,0xc5 = addiu $9, $6, -15001
|
||||
0x26,0x11,0x67,0x45 = addi $9, $6, 17767
|
||||
0x26,0x31,0x67,0xc5 = addiu $9, $6, -15001
|
||||
0xe6,0x00,0x50,0x49 = addu $9, $6, $7
|
||||
0xe6,0x00,0x90,0x49 = sub $9, $6, $7
|
||||
0xa3,0x00,0xd0,0x21 = subu $4, $3, $5
|
||||
0xe0,0x00,0x90,0x31 = neg $6, $7
|
||||
0xe0,0x00,0xd0,0x31 = negu $6, $7
|
||||
0x08,0x00,0x50,0x39 = move $7, $8
|
||||
0xa3,0x00,0x50,0x1b = slt $3, $3, $5
|
||||
0x63,0x90,0x67,0x00 = slti $3, $3, 103
|
||||
0x63,0x90,0x67,0x00 = slti $3, $3, 103
|
||||
0x63,0xb0,0x67,0x00 = sltiu $3, $3, 103
|
||||
0xa3,0x00,0x90,0x1b = sltu $3, $3, $5
|
||||
0xa9,0x41,0x67,0x45 = lui $9, 17767
|
||||
0xe6,0x00,0x50,0x4a = and $9, $6, $7
|
||||
0x26,0xd1,0x67,0x45 = andi $9, $6, 17767
|
||||
0x26,0xd1,0x67,0x45 = andi $9, $6, 17767
|
||||
0xa4,0x00,0x90,0x1a = or $3, $4, $5
|
||||
0x26,0x51,0x67,0x45 = ori $9, $6, 17767
|
||||
0xa3,0x00,0x10,0x1b = xor $3, $3, $5
|
||||
0x26,0x71,0x67,0x45 = xori $9, $6, 17767
|
||||
0x26,0x71,0x67,0x45 = xori $9, $6, 17767
|
||||
0xe6,0x00,0xd0,0x4a = nor $9, $6, $7
|
||||
0x08,0x00,0xd0,0x3a = not $7, $8
|
||||
0xe6,0x00,0x10,0x4a = mul $9, $6, $7
|
||||
0xe9,0x00,0x3c,0x8b = mult $9, $7
|
||||
0xe9,0x00,0x3c,0x9b = multu $9, $7
|
||||
0xe9,0x00,0x3c,0xab = div $zero, $9, $7
|
||||
0xe9,0x00,0x3c,0xbb = divu $zero, $9, $7
|
||||
0xe6,0x00,0x10,0x49 = add $t1, $a2, $a3
|
||||
0x26,0x11,0x67,0x45 = addi $t1, $a2, 17767
|
||||
0x26,0x31,0x67,0xc5 = addiu $t1, $a2, -15001
|
||||
0x26,0x11,0x67,0x45 = addi $t1, $a2, 17767
|
||||
0x26,0x31,0x67,0xc5 = addiu $t1, $a2, -15001
|
||||
0xe6,0x00,0x50,0x49 = addu $t1, $a2, $a3
|
||||
0xe6,0x00,0x90,0x49 = sub $t1, $a2, $a3
|
||||
0xa3,0x00,0xd0,0x21 = subu $a0, $v1, $a1
|
||||
0xe0,0x00,0x90,0x31 = sub $a2, $zero, $a3
|
||||
0xe0,0x00,0xd0,0x31 = subu $a2, $zero, $a3
|
||||
0x08,0x00,0x50,0x39 = addu $a3, $t0, $zero
|
||||
0xa3,0x00,0x50,0x1b = slt $v1, $v1, $a1
|
||||
0x63,0x90,0x67,0x00 = slti $v1, $v1, 103
|
||||
0x63,0x90,0x67,0x00 = slti $v1, $v1, 103
|
||||
0x63,0xb0,0x67,0x00 = sltiu $v1, $v1, 103
|
||||
0xa3,0x00,0x90,0x1b = sltu $v1, $v1, $a1
|
||||
0xa9,0x41,0x67,0x45 = lui $t1, 17767
|
||||
0xe6,0x00,0x50,0x4a = and $t1, $a2, $a3
|
||||
0x26,0xd1,0x67,0x45 = andi $t1, $a2, 17767
|
||||
0x26,0xd1,0x67,0x45 = andi $t1, $a2, 17767
|
||||
0xa4,0x00,0x90,0x1a = or $v1, $a0, $a1
|
||||
0x26,0x51,0x67,0x45 = ori $t1, $a2, 17767
|
||||
0xa3,0x00,0x10,0x1b = xor $v1, $v1, $a1
|
||||
0x26,0x71,0x67,0x45 = xori $t1, $a2, 17767
|
||||
0x26,0x71,0x67,0x45 = xori $t1, $a2, 17767
|
||||
0xe6,0x00,0xd0,0x4a = nor $t1, $a2, $a3
|
||||
0x08,0x00,0xd0,0x3a = not $a3, $t0
|
||||
0xe6,0x00,0x10,0x4a = mul $t1, $a2, $a3
|
||||
0xe9,0x00,0x3c,0x8b = mult $t1, $a3
|
||||
0xe9,0x00,0x3c,0x9b = multu $t1, $a3
|
||||
0xe9,0x00,0x3c,0xab = div $zero, $t1, $a3
|
||||
0xe9,0x00,0x3c,0xbb = divu $zero, $t1, $a3
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x94,0x00,0x02,0x9a = b 1332
|
||||
0x94,0xc9,0x02,0x9a = beq $9, $6, 1332
|
||||
0x40,0x46,0x02,0x9a = bgez $6, 1332
|
||||
0x40,0x66,0x02,0x9a = bgezal $6, 1332
|
||||
0x40,0x26,0x02,0x9a = bltzal $6, 1332
|
||||
0x40,0xc6,0x02,0x9a = bgtz $6, 1332
|
||||
0x40,0x86,0x02,0x9a = blez $6, 1332
|
||||
0xb4,0xc9,0x02,0x9a = bne $9, $6, 1332
|
||||
0x94,0xc9,0x02,0x9a = beq $t1, $a2, 1332
|
||||
0x40,0x46,0x02,0x9a = bgez $a2, 1332
|
||||
0x40,0x66,0x02,0x9a = bgezal $a2, 1332
|
||||
0x40,0x26,0x02,0x9a = bltzal $a2, 1332
|
||||
0x40,0xc6,0x02,0x9a = bgtz $a2, 1332
|
||||
0x40,0x86,0x02,0x9a = blez $a2, 1332
|
||||
0xb4,0xc9,0x02,0x9a = bne $t1, $a2, 1332
|
||||
0x40,0x60,0x02,0x9a = bal 1332
|
||||
0x40,0x06,0x02,0x9a = bltz $6, 1332
|
||||
0x40,0x06,0x02,0x9a = bltz $a2, 1332
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0x00,0x94,0x9a,0x02 = b 1332
|
||||
0xc9,0x94,0x9a,0x02 = beq $9, $6, 1332
|
||||
0x46,0x40,0x9a,0x02 = bgez $6, 1332
|
||||
0x66,0x40,0x9a,0x02 = bgezal $6, 1332
|
||||
0x26,0x40,0x9a,0x02 = bltzal $6, 1332
|
||||
0xc6,0x40,0x9a,0x02 = bgtz $6, 1332
|
||||
0x86,0x40,0x9a,0x02 = blez $6, 1332
|
||||
0xc9,0xb4,0x9a,0x02 = bne $9, $6, 1332
|
||||
0xc9,0x94,0x9a,0x02 = beq $t1, $a2, 1332
|
||||
0x46,0x40,0x9a,0x02 = bgez $a2, 1332
|
||||
0x66,0x40,0x9a,0x02 = bgezal $a2, 1332
|
||||
0x26,0x40,0x9a,0x02 = bltzal $a2, 1332
|
||||
0xc6,0x40,0x9a,0x02 = bgtz $a2, 1332
|
||||
0x86,0x40,0x9a,0x02 = blez $a2, 1332
|
||||
0xc9,0xb4,0x9a,0x02 = bne $t1, $a2, 1332
|
||||
0x60,0x40,0x9a,0x02 = bal 1332
|
||||
0x06,0x40,0x9a,0x02 = bltz $6, 1332
|
||||
0x06,0x40,0x9a,0x02 = bltz $a2, 1332
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0xa0,0x50,0x7b,0x00 = ori $5, $zero, 123
|
||||
0xc0,0x30,0xd7,0xf6 = addiu $6, $zero, -2345
|
||||
0xa7,0x41,0x01,0x00 = lui $7, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $7, $7, 2
|
||||
0x80,0x30,0x14,0x00 = addiu $4, $zero, 20
|
||||
0xa7,0x41,0x01,0x00 = lui $7, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $7, $7, 2
|
||||
0x85,0x30,0x14,0x00 = addiu $4, $5, 20
|
||||
0xa7,0x41,0x01,0x00 = lui $7, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $7, $7, 2
|
||||
0x07,0x01,0x50,0x39 = addu $7, $7, $8
|
||||
0x8a,0x00,0x50,0x51 = addu $10, $10, $4
|
||||
0x21,0x01,0x50,0x09 = addu $1, $1, $9
|
||||
0xaa,0x41,0x0a,0x00 = lui $10, 10
|
||||
0x8a,0x00,0x50,0x51 = addu $10, $10, $4
|
||||
0x4a,0xfd,0x7b,0x00 = lw $10, 123($10)
|
||||
0xa1,0x41,0x02,0x00 = lui $1, 2
|
||||
0x21,0x01,0x50,0x09 = addu $1, $1, $9
|
||||
0x41,0xf9,0x40,0xe2 = sw $10, 57920($1)
|
||||
0xa0,0x50,0x7b,0x00 = ori $a1, $zero, 123
|
||||
0xc0,0x30,0xd7,0xf6 = addiu $a2, $zero, -2345
|
||||
0xa7,0x41,0x01,0x00 = lui $a3, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $a3, $a3, 2
|
||||
0x80,0x30,0x14,0x00 = addiu $a0, $zero, 20
|
||||
0xa7,0x41,0x01,0x00 = lui $a3, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $a3, $a3, 2
|
||||
0x85,0x30,0x14,0x00 = addiu $a0, $a1, 20
|
||||
0xa7,0x41,0x01,0x00 = lui $a3, 1
|
||||
0xe7,0x50,0x02,0x00 = ori $a3, $a3, 2
|
||||
0x07,0x01,0x50,0x39 = addu $a3, $a3, $t0
|
||||
0x8a,0x00,0x50,0x51 = addu $t2, $t2, $a0
|
||||
0x21,0x01,0x50,0x09 = addu $at, $at, $t1
|
||||
0xaa,0x41,0x0a,0x00 = lui $t2, 10
|
||||
0x8a,0x00,0x50,0x51 = addu $t2, $t2, $a0
|
||||
0x4a,0xfd,0x7b,0x00 = lw $t2, 123($t2)
|
||||
0xa1,0x41,0x02,0x00 = lui $at, 2
|
||||
0x21,0x01,0x50,0x09 = addu $at, $at, $t1
|
||||
0x41,0xf9,0x40,0xe2 = sw $t2, 57920($at)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0xd4,0x00,0x02,0x98 = j 1328
|
||||
0xf4,0x00,0x02,0x98 = jal 1328
|
||||
0x03,0xe6,0x0f,0x3c = jalr $6
|
||||
0x00,0x07,0x0f,0x3c = jr $7
|
||||
0x03,0xe6,0x0f,0x3c = jalr $a2
|
||||
0x00,0x07,0x0f,0x3c = jr $a3
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0x00,0xd4,0x98,0x02 = j 1328
|
||||
0x00,0xf4,0x98,0x02 = jal 1328
|
||||
0xe6,0x03,0x3c,0x0f = jalr $6
|
||||
0x07,0x00,0x3c,0x0f = jr $7
|
||||
0x07,0x00,0x3c,0x0f = jr $7
|
||||
0xe6,0x03,0x3c,0x0f = jalr $a2
|
||||
0x07,0x00,0x3c,0x0f = jr $a3
|
||||
0x07,0x00,0x3c,0x0f = jr $a3
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x1c,0xa4,0x00,0x08 = lb $5, 8($4)
|
||||
0x14,0xc4,0x00,0x08 = lbu $6, 8($4)
|
||||
0x3c,0x44,0x00,0x08 = lh $2, 8($4)
|
||||
0x34,0x82,0x00,0x08 = lhu $4, 8($2)
|
||||
0xfc,0xc5,0x00,0x04 = lw $6, 4($5)
|
||||
0x18,0xa4,0x00,0x08 = sb $5, 8($4)
|
||||
0x38,0x44,0x00,0x08 = sh $2, 8($4)
|
||||
0xf8,0xa6,0x00,0x04 = sw $5, 4($6)
|
||||
0x1c,0xa4,0x00,0x08 = lb $a1, 8($a0)
|
||||
0x14,0xc4,0x00,0x08 = lbu $a2, 8($a0)
|
||||
0x3c,0x44,0x00,0x08 = lh $v0, 8($a0)
|
||||
0x34,0x82,0x00,0x08 = lhu $a0, 8($v0)
|
||||
0xfc,0xc5,0x00,0x04 = lw $a2, 4($a1)
|
||||
0x18,0xa4,0x00,0x08 = sb $a1, 8($a0)
|
||||
0x38,0x44,0x00,0x08 = sh $v0, 8($a0)
|
||||
0xf8,0xa6,0x00,0x04 = sw $a1, 4($a2)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0xa4,0x1c,0x08,0x00 = lb $5, 8($4)
|
||||
0xc4,0x14,0x08,0x00 = lbu $6, 8($4)
|
||||
0x44,0x3c,0x08,0x00 = lh $2, 8($4)
|
||||
0x82,0x34,0x08,0x00 = lhu $4, 8($2)
|
||||
0xc5,0xfc,0x04,0x00 = lw $6, 4($5)
|
||||
0xa4,0x18,0x08,0x00 = sb $5, 8($4)
|
||||
0x44,0x38,0x08,0x00 = sh $2, 8($4)
|
||||
0xa6,0xf8,0x04,0x00 = sw $5, 4($6)
|
||||
0xa4,0x1c,0x08,0x00 = lb $a1, 8($a0)
|
||||
0xc4,0x14,0x08,0x00 = lbu $a2, 8($a0)
|
||||
0x44,0x3c,0x08,0x00 = lh $v0, 8($a0)
|
||||
0x82,0x34,0x08,0x00 = lhu $a0, 8($v0)
|
||||
0xc5,0xfc,0x04,0x00 = lw $a2, 4($a1)
|
||||
0xa4,0x18,0x08,0x00 = sb $a1, 8($a0)
|
||||
0x44,0x38,0x08,0x00 = sh $v0, 8($a0)
|
||||
0xa6,0xf8,0x04,0x00 = sw $a1, 4($a2)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x60,0x85,0x00,0x10 = lwl $4, 16($5)
|
||||
0x60,0x85,0x10,0x10 = lwr $4, 16($5)
|
||||
0x60,0x85,0x80,0x10 = swl $4, 16($5)
|
||||
0x60,0x85,0x90,0x10 = swr $4, 16($5)
|
||||
0x60,0x85,0x00,0x10 = lwl $a0, 16($a1)
|
||||
0x60,0x85,0x10,0x10 = lwr $a0, 16($a1)
|
||||
0x60,0x85,0x80,0x10 = swl $a0, 16($a1)
|
||||
0x60,0x85,0x90,0x10 = swr $a0, 16($a1)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0x85,0x60,0x10,0x00 = lwl $4, 16($5)
|
||||
0x85,0x60,0x10,0x10 = lwr $4, 16($5)
|
||||
0x85,0x60,0x10,0x80 = swl $4, 16($5)
|
||||
0x85,0x60,0x10,0x90 = swr $4, 16($5)
|
||||
0x85,0x60,0x10,0x00 = lwl $a0, 16($a1)
|
||||
0x85,0x60,0x10,0x10 = lwr $a0, 16($a1)
|
||||
0x85,0x60,0x10,0x80 = swl $a0, 16($a1)
|
||||
0x85,0x60,0x10,0x90 = swr $a0, 16($a1)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x00,0xe6,0x48,0x58 = movz $9, $6, $7
|
||||
0x00,0xe6,0x48,0x18 = movn $9, $6, $7
|
||||
0x55,0x26,0x09,0x7b = movt $9, $6, $fcc0
|
||||
0x55,0x26,0x01,0x7b = movf $9, $6, $fcc0
|
||||
0x00,0xe6,0x48,0x58 = movz $t1, $a2, $a3
|
||||
0x00,0xe6,0x48,0x18 = movn $t1, $a2, $a3
|
||||
0x55,0x26,0x09,0x7b = movt $t1, $a2, $fcc0
|
||||
0x55,0x26,0x01,0x7b = movf $t1, $a2, $fcc0
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0xe6,0x00,0x58,0x48 = movz $9, $6, $7
|
||||
0xe6,0x00,0x18,0x48 = movn $9, $6, $7
|
||||
0x26,0x55,0x7b,0x09 = movt $9, $6, $fcc0
|
||||
0x26,0x55,0x7b,0x01 = movf $9, $6, $fcc0
|
||||
0xe6,0x00,0x58,0x48 = movz $t1, $a2, $a3
|
||||
0xe6,0x00,0x18,0x48 = movn $t1, $a2, $a3
|
||||
0x26,0x55,0x7b,0x09 = movt $t1, $a2, $fcc0
|
||||
0x26,0x55,0x7b,0x01 = movf $t1, $a2, $fcc0
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x00,0xa4,0xcb,0x3c = madd $4, $5
|
||||
0x00,0xa4,0xdb,0x3c = maddu $4, $5
|
||||
0x00,0xa4,0xeb,0x3c = msub $4, $5
|
||||
0x00,0xa4,0xfb,0x3c = msubu $4, $5
|
||||
0x00,0xa4,0xcb,0x3c = madd $a0, $a1
|
||||
0x00,0xa4,0xdb,0x3c = maddu $a0, $a1
|
||||
0x00,0xa4,0xeb,0x3c = msub $a0, $a1
|
||||
0x00,0xa4,0xfb,0x3c = msubu $a0, $a1
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0xa4,0x00,0x3c,0xcb = madd $4, $5
|
||||
0xa4,0x00,0x3c,0xdb = maddu $4, $5
|
||||
0xa4,0x00,0x3c,0xeb = msub $4, $5
|
||||
0xa4,0x00,0x3c,0xfb = msubu $4, $5
|
||||
0xa4,0x00,0x3c,0xcb = madd $a0, $a1
|
||||
0xa4,0x00,0x3c,0xdb = maddu $a0, $a1
|
||||
0xa4,0x00,0x3c,0xeb = msub $a0, $a1
|
||||
0xa4,0x00,0x3c,0xfb = msubu $a0, $a1
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x00,0x83,0x38,0x00 = sll $4, $3, 7
|
||||
0x00,0x65,0x10,0x10 = sllv $2, $3, $5
|
||||
0x00,0x83,0x38,0x80 = sra $4, $3, 7
|
||||
0x00,0x65,0x10,0x90 = srav $2, $3, $5
|
||||
0x00,0x83,0x38,0x40 = srl $4, $3, 7
|
||||
0x00,0x65,0x10,0x50 = srlv $2, $3, $5
|
||||
0x01,0x26,0x38,0xc0 = rotr $9, $6, 7
|
||||
0x00,0xc7,0x48,0xd0 = rotrv $9, $6, $7
|
||||
0x00,0x83,0x38,0x00 = sll $a0, $v1, 7
|
||||
0x00,0x65,0x10,0x10 = sllv $v0, $v1, $a1
|
||||
0x00,0x83,0x38,0x80 = sra $a0, $v1, 7
|
||||
0x00,0x65,0x10,0x90 = srav $v0, $v1, $a1
|
||||
0x00,0x83,0x38,0x40 = srl $a0, $v1, 7
|
||||
0x00,0x65,0x10,0x50 = srlv $v0, $v1, $a1
|
||||
0x01,0x26,0x38,0xc0 = rotr $t1, $a2, 7
|
||||
0x00,0xc7,0x48,0xd0 = rotrv $t1, $a2, $a3
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0x83,0x00,0x00,0x38 = sll $4, $3, 7
|
||||
0x65,0x00,0x10,0x10 = sllv $2, $3, $5
|
||||
0x83,0x00,0x80,0x38 = sra $4, $3, 7
|
||||
0x65,0x00,0x90,0x10 = srav $2, $3, $5
|
||||
0x83,0x00,0x40,0x38 = srl $4, $3, 7
|
||||
0x65,0x00,0x50,0x10 = srlv $2, $3, $5
|
||||
0x26,0x01,0xc0,0x38 = rotr $9, $6, 7
|
||||
0xc7,0x00,0xd0,0x48 = rotrv $9, $6, $7
|
||||
0x83,0x00,0x00,0x38 = sll $a0, $v1, 7
|
||||
0x65,0x00,0x10,0x10 = sllv $v0, $v1, $a1
|
||||
0x83,0x00,0x80,0x38 = sra $a0, $v1, 7
|
||||
0x65,0x00,0x90,0x10 = srav $v0, $v1, $a1
|
||||
0x83,0x00,0x40,0x38 = srl $a0, $v1, 7
|
||||
0x65,0x00,0x50,0x10 = srlv $v0, $v1, $a1
|
||||
0x26,0x01,0xc0,0x38 = rotr $t1, $a2, 7
|
||||
0xc7,0x00,0xd0,0x48 = rotrv $t1, $a2, $a3
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN, None
|
||||
0x01,0x28,0x00,0x3c = teq $8, $9
|
||||
0x01,0x28,0x02,0x3c = tge $8, $9
|
||||
0x01,0x28,0x04,0x3c = tgeu $8, $9
|
||||
0x01,0x28,0x08,0x3c = tlt $8, $9
|
||||
0x01,0x28,0x0a,0x3c = tltu $8, $9
|
||||
0x01,0x28,0x0c,0x3c = tne $8, $9
|
||||
0x41,0xc9,0x45,0x67 = teqi $9, 17767
|
||||
0x41,0x29,0x45,0x67 = tgei $9, 17767
|
||||
0x41,0x69,0x45,0x67 = tgeiu $9, 17767
|
||||
0x41,0x09,0x45,0x67 = tlti $9, 17767
|
||||
0x41,0x49,0x45,0x67 = tltiu $9, 17767
|
||||
0x41,0x89,0x45,0x67 = tnei $9, 17767
|
||||
0x01,0x28,0x00,0x3c = teq $t0, $t1
|
||||
0x01,0x28,0x02,0x3c = tge $t0, $t1
|
||||
0x01,0x28,0x04,0x3c = tgeu $t0, $t1
|
||||
0x01,0x28,0x08,0x3c = tlt $t0, $t1
|
||||
0x01,0x28,0x0a,0x3c = tltu $t0, $t1
|
||||
0x01,0x28,0x0c,0x3c = tne $t0, $t1
|
||||
0x41,0xc9,0x45,0x67 = teqi $t1, 17767
|
||||
0x41,0x29,0x45,0x67 = tgei $t1, 17767
|
||||
0x41,0x69,0x45,0x67 = tgeiu $t1, 17767
|
||||
0x41,0x09,0x45,0x67 = tlti $t1, 17767
|
||||
0x41,0x49,0x45,0x67 = tltiu $t1, 17767
|
||||
0x41,0x89,0x45,0x67 = tnei $t1, 17767
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_MICRO, None
|
||||
0x28,0x01,0x3c,0x00 = teq $8, $9
|
||||
0x28,0x01,0x3c,0x02 = tge $8, $9
|
||||
0x28,0x01,0x3c,0x04 = tgeu $8, $9
|
||||
0x28,0x01,0x3c,0x08 = tlt $8, $9
|
||||
0x28,0x01,0x3c,0x0a = tltu $8, $9
|
||||
0x28,0x01,0x3c,0x0c = tne $8, $9
|
||||
0xc9,0x41,0x67,0x45 = teqi $9, 17767
|
||||
0x29,0x41,0x67,0x45 = tgei $9, 17767
|
||||
0x69,0x41,0x67,0x45 = tgeiu $9, 17767
|
||||
0x09,0x41,0x67,0x45 = tlti $9, 17767
|
||||
0x49,0x41,0x67,0x45 = tltiu $9, 17767
|
||||
0x89,0x41,0x67,0x45 = tnei $9, 17767
|
||||
0x28,0x01,0x3c,0x00 = teq $t0, $t1
|
||||
0x28,0x01,0x3c,0x02 = tge $t0, $t1
|
||||
0x28,0x01,0x3c,0x04 = tgeu $t0, $t1
|
||||
0x28,0x01,0x3c,0x08 = tlt $t0, $t1
|
||||
0x28,0x01,0x3c,0x0a = tltu $t0, $t1
|
||||
0x28,0x01,0x3c,0x0c = tne $t0, $t1
|
||||
0xc9,0x41,0x67,0x45 = teqi $t1, 17767
|
||||
0x29,0x41,0x67,0x45 = tgei $t1, 17767
|
||||
0x69,0x41,0x67,0x45 = tgeiu $t1, 17767
|
||||
0x09,0x41,0x67,0x45 = tlti $t1, 17767
|
||||
0x49,0x41,0x67,0x45 = tltiu $t1, 17767
|
||||
0x89,0x41,0x67,0x45 = tnei $t1, 17767
|
||||
|
|
|
@ -1,53 +1,53 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32, None
|
||||
0x24,0x48,0xc7,0x00 = and $9, $6, $7
|
||||
0x67,0x45,0xc9,0x30 = andi $9, $6, 17767
|
||||
0x67,0x45,0xc9,0x30 = andi $9, $6, 17767
|
||||
0x67,0x45,0x29,0x31 = andi $9, $9, 17767
|
||||
0x21,0x30,0xe6,0x70 = clo $6, $7
|
||||
0x20,0x30,0xe6,0x70 = clz $6, $7
|
||||
0x84,0x61,0x33,0x7d = ins $19, $9, 6, 7
|
||||
0x27,0x48,0xc7,0x00 = nor $9, $6, $7
|
||||
0x25,0x18,0x65,0x00 = or $3, $3, $5
|
||||
0x67,0x45,0xa4,0x34 = ori $4, $5, 17767
|
||||
0x67,0x45,0xc9,0x34 = ori $9, $6, 17767
|
||||
0x80,0x00,0x6b,0x35 = ori $11, $11, 128
|
||||
0xc2,0x49,0x26,0x00 = rotr $9, $6, 7
|
||||
0x46,0x48,0xe6,0x00 = rotrv $9, $6, $7
|
||||
0xc0,0x21,0x03,0x00 = sll $4, $3, 7
|
||||
0x04,0x10,0xa3,0x00 = sllv $2, $3, $5
|
||||
0x2a,0x18,0x65,0x00 = slt $3, $3, $5
|
||||
0x67,0x00,0x63,0x28 = slti $3, $3, 103
|
||||
0x67,0x00,0x63,0x28 = slti $3, $3, 103
|
||||
0x67,0x00,0x63,0x2c = sltiu $3, $3, 103
|
||||
0x2b,0x18,0x65,0x00 = sltu $3, $3, $5
|
||||
0xc3,0x21,0x03,0x00 = sra $4, $3, 7
|
||||
0x07,0x10,0xa3,0x00 = srav $2, $3, $5
|
||||
0xc2,0x21,0x03,0x00 = srl $4, $3, 7
|
||||
0x06,0x10,0xa3,0x00 = srlv $2, $3, $5
|
||||
0x26,0x18,0x65,0x00 = xor $3, $3, $5
|
||||
0x67,0x45,0xc9,0x38 = xori $9, $6, 17767
|
||||
0x67,0x45,0xc9,0x38 = xori $9, $6, 17767
|
||||
0x0c,0x00,0x6b,0x39 = xori $11, $11, 12
|
||||
0xa0,0x30,0x07,0x7c = wsbh $6, $7
|
||||
0x27,0x38,0x00,0x01 = not $7, $8
|
||||
0x20,0x48,0xc7,0x00 = add $9, $6, $7
|
||||
0x67,0x45,0xc9,0x20 = addi $9, $6, 17767
|
||||
0x67,0xc5,0xc9,0x24 = addiu $9, $6, -15001
|
||||
0x67,0x45,0xc9,0x20 = addi $9, $6, 17767
|
||||
0x67,0x45,0x29,0x21 = addi $9, $9, 17767
|
||||
0x67,0xc5,0xc9,0x24 = addiu $9, $6, -15001
|
||||
0x28,0x00,0x6b,0x25 = addiu $11, $11, 40
|
||||
0x21,0x48,0xc7,0x00 = addu $9, $6, $7
|
||||
0x00,0x00,0xc7,0x70 = madd $6, $7
|
||||
0x01,0x00,0xc7,0x70 = maddu $6, $7
|
||||
0x04,0x00,0xc7,0x70 = msub $6, $7
|
||||
0x05,0x00,0xc7,0x70 = msubu $6, $7
|
||||
0x18,0x00,0x65,0x00 = mult $3, $5
|
||||
0x19,0x00,0x65,0x00 = multu $3, $5
|
||||
0x22,0x48,0xc7,0x00 = sub $9, $6, $7
|
||||
0x24,0x48,0xc7,0x00 = and $t1, $a2, $a3
|
||||
0x67,0x45,0xc9,0x30 = andi $t1, $a2, 17767
|
||||
0x67,0x45,0xc9,0x30 = andi $t1, $a2, 17767
|
||||
0x67,0x45,0x29,0x31 = andi $t1, $t1, 17767
|
||||
0x21,0x30,0xe6,0x70 = clo $a2, $a3
|
||||
0x20,0x30,0xe6,0x70 = clz $a2, $a3
|
||||
0x84,0x61,0x33,0x7d = ins $s3, $t1, 6, 7
|
||||
0x27,0x48,0xc7,0x00 = nor $t1, $a2, $a3
|
||||
0x25,0x18,0x65,0x00 = or $v1, $v1, $a1
|
||||
0x67,0x45,0xa4,0x34 = ori $a0, $a1, 17767
|
||||
0x67,0x45,0xc9,0x34 = ori $t1, $a2, 17767
|
||||
0x80,0x00,0x6b,0x35 = ori $t3, $t3, 128
|
||||
0xc2,0x49,0x26,0x00 = rotr $t1, $a2, 7
|
||||
0x46,0x48,0xe6,0x00 = rotrv $t1, $a2, $a3
|
||||
0xc0,0x21,0x03,0x00 = sll $a0, $v1, 7
|
||||
0x04,0x10,0xa3,0x00 = sllv $v0, $v1, $a1
|
||||
0x2a,0x18,0x65,0x00 = slt $v1, $v1, $a1
|
||||
0x67,0x00,0x63,0x28 = slti $v1, $v1, 103
|
||||
0x67,0x00,0x63,0x28 = slti $v1, $v1, 103
|
||||
0x67,0x00,0x63,0x2c = sltiu $v1, $v1, 103
|
||||
0x2b,0x18,0x65,0x00 = sltu $v1, $v1, $a1
|
||||
0xc3,0x21,0x03,0x00 = sra $a0, $v1, 7
|
||||
0x07,0x10,0xa3,0x00 = srav $v0, $v1, $a1
|
||||
0xc2,0x21,0x03,0x00 = srl $a0, $v1, 7
|
||||
0x06,0x10,0xa3,0x00 = srlv $v0, $v1, $a1
|
||||
0x26,0x18,0x65,0x00 = xor $v1, $v1, $a1
|
||||
0x67,0x45,0xc9,0x38 = xori $t1, $a2, 17767
|
||||
0x67,0x45,0xc9,0x38 = xori $t1, $a2, 17767
|
||||
0x0c,0x00,0x6b,0x39 = xori $t3, $t3, 12
|
||||
0xa0,0x30,0x07,0x7c = wsbh $a2, $a3
|
||||
0x27,0x38,0x00,0x01 = not $a3, $t0
|
||||
0x20,0x48,0xc7,0x00 = add $t1, $a2, $a3
|
||||
0x67,0x45,0xc9,0x20 = addi $t1, $a2, 17767
|
||||
0x67,0xc5,0xc9,0x24 = addiu $t1, $a2, -15001
|
||||
0x67,0x45,0xc9,0x20 = addi $t1, $a2, 17767
|
||||
0x67,0x45,0x29,0x21 = addi $t1, $t1, 17767
|
||||
0x67,0xc5,0xc9,0x24 = addiu $t1, $a2, -15001
|
||||
0x28,0x00,0x6b,0x25 = addiu $t3, $t3, 40
|
||||
0x21,0x48,0xc7,0x00 = addu $t1, $a2, $a3
|
||||
0x00,0x00,0xc7,0x70 = madd $a2, $a3
|
||||
0x01,0x00,0xc7,0x70 = maddu $a2, $a3
|
||||
0x04,0x00,0xc7,0x70 = msub $a2, $a3
|
||||
0x05,0x00,0xc7,0x70 = msubu $a2, $a3
|
||||
0x18,0x00,0x65,0x00 = mult $v1, $a1
|
||||
0x19,0x00,0x65,0x00 = multu $v1, $a1
|
||||
0x22,0x48,0xc7,0x00 = sub $t1, $a2, $a3
|
||||
0xc8,0xff,0xbd,0x23 = addi $sp, $sp, -56
|
||||
0x23,0x20,0x65,0x00 = subu $4, $3, $5
|
||||
0x23,0x20,0x65,0x00 = subu $a0, $v1, $a1
|
||||
0xd8,0xff,0xbd,0x27 = addiu $sp, $sp, -40
|
||||
0x22,0x30,0x07,0x00 = neg $6, $7
|
||||
0x23,0x30,0x07,0x00 = negu $6, $7
|
||||
0x21,0x38,0x00,0x01 = move $7, $8
|
||||
0x22,0x30,0x07,0x00 = neg $a2, $a3
|
||||
0x23,0x30,0x07,0x00 = negu $a2, $a3
|
||||
0x21,0x38,0x00,0x01 = move $a3, $t0
|
||||
|
|
|
@ -8,26 +8,26 @@
|
|||
0x42,0x00,0x00,0x1f = deret
|
||||
0x41,0x60,0x60,0x00 = di
|
||||
0x41,0x60,0x60,0x00 = di
|
||||
0x41,0x6a,0x60,0x00 = di $10
|
||||
0x41,0x6a,0x60,0x00 = di $t2
|
||||
0x41,0x60,0x60,0x20 = ei
|
||||
0x41,0x60,0x60,0x20 = ei
|
||||
0x41,0x6a,0x60,0x20 = ei $10
|
||||
0x41,0x6a,0x60,0x20 = ei $t2
|
||||
0x42,0x00,0x00,0x20 = wait
|
||||
0x00,0x03,0x00,0x34 = teq $zero, $3
|
||||
0x00,0x03,0x00,0x74 = teq $zero, $3, 1
|
||||
0x04,0x6c,0x00,0x01 = teqi $3, 1
|
||||
0x00,0x03,0x00,0x30 = tge $zero, $3
|
||||
0x00,0x03,0x00,0xf0 = tge $zero, $3, 3
|
||||
0x04,0x68,0x00,0x03 = tgei $3, 3
|
||||
0x00,0x03,0x00,0x31 = tgeu $zero, $3
|
||||
0x00,0x03,0x01,0xf1 = tgeu $zero, $3, 7
|
||||
0x04,0x69,0x00,0x07 = tgeiu $3, 7
|
||||
0x00,0x03,0x00,0x32 = tlt $zero, $3
|
||||
0x00,0x03,0x07,0xf2 = tlt $zero, $3, 31
|
||||
0x04,0x6a,0x00,0x1f = tlti $3, 31
|
||||
0x00,0x03,0x00,0x33 = tltu $zero, $3
|
||||
0x00,0x03,0x3f,0xf3 = tltu $zero, $3, 255
|
||||
0x04,0x6b,0x00,0xff = tltiu $3, 255
|
||||
0x00,0x03,0x00,0x36 = tne $zero, $3
|
||||
0x00,0x03,0xff,0xf6 = tne $zero, $3, 1023
|
||||
0x04,0x6e,0x03,0xff = tnei $3, 1023
|
||||
0x00,0x03,0x00,0x34 = teq $zero, $v1
|
||||
0x00,0x03,0x00,0x74 = teq $zero, $v1, 1
|
||||
0x04,0x6c,0x00,0x01 = teqi $v1, 1
|
||||
0x00,0x03,0x00,0x30 = tge $zero, $v1
|
||||
0x00,0x03,0x00,0xf0 = tge $zero, $v1, 3
|
||||
0x04,0x68,0x00,0x03 = tgei $v1, 3
|
||||
0x00,0x03,0x00,0x31 = tgeu $zero, $v1
|
||||
0x00,0x03,0x01,0xf1 = tgeu $zero, $v1, 7
|
||||
0x04,0x69,0x00,0x07 = tgeiu $v1, 7
|
||||
0x00,0x03,0x00,0x32 = tlt $zero, $v1
|
||||
0x00,0x03,0x07,0xf2 = tlt $zero, $v1, 31
|
||||
0x04,0x6a,0x00,0x1f = tlti $v1, 31
|
||||
0x00,0x03,0x00,0x33 = tltu $zero, $v1
|
||||
0x00,0x03,0x3f,0xf3 = tltu $zero, $v1, 255
|
||||
0x04,0x6b,0x00,0xff = tltiu $v1, 255
|
||||
0x00,0x03,0x00,0x36 = tne $zero, $v1
|
||||
0x00,0x03,0xff,0xf6 = tne $zero, $v1, 1023
|
||||
0x04,0x6e,0x03,0xff = tnei $v1, 1023
|
||||
|
|
|
@ -8,26 +8,26 @@
|
|||
0x42,0x00,0x00,0x1f = deret
|
||||
0x41,0x60,0x60,0x00 = di
|
||||
0x41,0x60,0x60,0x00 = di
|
||||
0x41,0x6a,0x60,0x00 = di $10
|
||||
0x41,0x6a,0x60,0x00 = di $t2
|
||||
0x41,0x60,0x60,0x20 = ei
|
||||
0x41,0x60,0x60,0x20 = ei
|
||||
0x41,0x6a,0x60,0x20 = ei $10
|
||||
0x41,0x6a,0x60,0x20 = ei $t2
|
||||
0x42,0x00,0x00,0x20 = wait
|
||||
0x00,0x03,0x00,0x34 = teq $zero, $3
|
||||
0x00,0x03,0x00,0x74 = teq $zero, $3, 1
|
||||
0x04,0x6c,0x00,0x01 = teqi $3, 1
|
||||
0x00,0x03,0x00,0x30 = tge $zero, $3
|
||||
0x00,0x03,0x00,0xf0 = tge $zero, $3, 3
|
||||
0x04,0x68,0x00,0x03 = tgei $3, 3
|
||||
0x00,0x03,0x00,0x31 = tgeu $zero, $3
|
||||
0x00,0x03,0x01,0xf1 = tgeu $zero, $3, 7
|
||||
0x04,0x69,0x00,0x07 = tgeiu $3, 7
|
||||
0x00,0x03,0x00,0x32 = tlt $zero, $3
|
||||
0x00,0x03,0x07,0xf2 = tlt $zero, $3, 31
|
||||
0x04,0x6a,0x00,0x1f = tlti $3, 31
|
||||
0x00,0x03,0x00,0x33 = tltu $zero, $3
|
||||
0x00,0x03,0x3f,0xf3 = tltu $zero, $3, 255
|
||||
0x04,0x6b,0x00,0xff = tltiu $3, 255
|
||||
0x00,0x03,0x00,0x36 = tne $zero, $3
|
||||
0x00,0x03,0xff,0xf6 = tne $zero, $3, 1023
|
||||
0x04,0x6e,0x03,0xff = tnei $3, 1023
|
||||
0x00,0x03,0x00,0x34 = teq $zero, $v1
|
||||
0x00,0x03,0x00,0x74 = teq $zero, $v1, 1
|
||||
0x04,0x6c,0x00,0x01 = teqi $v1, 1
|
||||
0x00,0x03,0x00,0x30 = tge $zero, $v1
|
||||
0x00,0x03,0x00,0xf0 = tge $zero, $v1, 3
|
||||
0x04,0x68,0x00,0x03 = tgei $v1, 3
|
||||
0x00,0x03,0x00,0x31 = tgeu $zero, $v1
|
||||
0x00,0x03,0x01,0xf1 = tgeu $zero, $v1, 7
|
||||
0x04,0x69,0x00,0x07 = tgeiu $v1, 7
|
||||
0x00,0x03,0x00,0x32 = tlt $zero, $v1
|
||||
0x00,0x03,0x07,0xf2 = tlt $zero, $v1, 31
|
||||
0x04,0x6a,0x00,0x1f = tlti $v1, 31
|
||||
0x00,0x03,0x00,0x33 = tltu $zero, $v1
|
||||
0x00,0x03,0x3f,0xf3 = tltu $zero, $v1, 255
|
||||
0x04,0x6b,0x00,0xff = tltiu $v1, 255
|
||||
0x00,0x03,0x00,0x36 = tne $zero, $v1
|
||||
0x00,0x03,0xff,0xf6 = tne $zero, $v1, 1023
|
||||
0x04,0x6e,0x03,0xff = tnei $v1, 1023
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN, None
|
||||
0x40,0xac,0x80,0x02 = dmtc0 $12, $16, 2
|
||||
0x40,0xac,0x80,0x00 = dmtc0 $12, $16, 0
|
||||
0x40,0x8c,0x80,0x02 = mtc0 $12, $16, 2
|
||||
0x40,0x8c,0x80,0x00 = mtc0 $12, $16, 0
|
||||
0x40,0x2c,0x80,0x02 = dmfc0 $12, $16, 2
|
||||
0x40,0x2c,0x80,0x00 = dmfc0 $12, $16, 0
|
||||
0x40,0x0c,0x80,0x02 = mfc0 $12, $16, 2
|
||||
0x40,0x0c,0x80,0x00 = mfc0 $12, $16, 0
|
||||
0x48,0xac,0x80,0x02 = dmtc2 $12, $16, 2
|
||||
0x48,0xac,0x80,0x00 = dmtc2 $12, $16, 0
|
||||
0x48,0x8c,0x80,0x02 = mtc2 $12, $16, 2
|
||||
0x48,0x8c,0x80,0x00 = mtc2 $12, $16, 0
|
||||
0x48,0x2c,0x80,0x02 = dmfc2 $12, $16, 2
|
||||
0x48,0x2c,0x80,0x00 = dmfc2 $12, $16, 0
|
||||
0x48,0x0c,0x80,0x02 = mfc2 $12, $16, 2
|
||||
0x48,0x0c,0x80,0x00 = mfc2 $12, $16, 0
|
||||
0x40,0xac,0x80,0x02 = dmtc0 $t4, $s0, 2
|
||||
0x40,0xac,0x80,0x00 = dmtc0 $t4, $s0, 0
|
||||
0x40,0x8c,0x80,0x02 = mtc0 $t4, $s0, 2
|
||||
0x40,0x8c,0x80,0x00 = mtc0 $t4, $s0, 0
|
||||
0x40,0x2c,0x80,0x02 = dmfc0 $t4, $s0, 2
|
||||
0x40,0x2c,0x80,0x00 = dmfc0 $t4, $s0, 0
|
||||
0x40,0x0c,0x80,0x02 = mfc0 $t4, $s0, 2
|
||||
0x40,0x0c,0x80,0x00 = mfc0 $t4, $s0, 0
|
||||
0x48,0xac,0x80,0x02 = dmtc2 $t4, $s0, 2
|
||||
0x48,0xac,0x80,0x00 = dmtc2 $t4, $s0, 0
|
||||
0x48,0x8c,0x80,0x02 = mtc2 $t4, $s0, 2
|
||||
0x48,0x8c,0x80,0x00 = mtc2 $t4, $s0, 0
|
||||
0x48,0x2c,0x80,0x02 = dmfc2 $t4, $s0, 2
|
||||
0x48,0x2c,0x80,0x00 = dmfc2 $t4, $s0, 0
|
||||
0x48,0x0c,0x80,0x02 = mfc2 $t4, $s0, 2
|
||||
0x48,0x0c,0x80,0x00 = mfc2 $t4, $s0, 0
|
||||
|
|
|
@ -1,43 +1,43 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x7e,0x32,0x83,0x11 = precrq.qb.ph $16, $17, $18
|
||||
0x7e,0x53,0x8d,0x11 = precrq.ph.w $17, $18, $19
|
||||
0x7e,0x74,0x95,0x51 = precrq_rs.ph.w $18, $19, $20
|
||||
0x7e,0x95,0x9b,0xd1 = precrqu_s.qb.ph $19, $20, $21
|
||||
0x7c,0x15,0xa3,0x12 = preceq.w.phl $20, $21
|
||||
0x7c,0x16,0xab,0x52 = preceq.w.phr $21, $22
|
||||
0x7c,0x17,0xb1,0x12 = precequ.ph.qbl $22, $23
|
||||
0x7c,0x18,0xb9,0x52 = precequ.ph.qbr $23, $24
|
||||
0x7c,0x19,0xc1,0x92 = precequ.ph.qbla $24, $25
|
||||
0x7c,0x1a,0xc9,0xd2 = precequ.ph.qbra $25, $26
|
||||
0x7c,0x1b,0xd7,0x12 = preceu.ph.qbl $26, $27
|
||||
0x7c,0x1c,0xdf,0x52 = preceu.ph.qbr $27, $gp
|
||||
0x7e,0x32,0x83,0x11 = precrq.qb.ph $s0, $s1, $s2
|
||||
0x7e,0x53,0x8d,0x11 = precrq.ph.w $s1, $s2, $s3
|
||||
0x7e,0x74,0x95,0x51 = precrq_rs.ph.w $s2, $s3, $s4
|
||||
0x7e,0x95,0x9b,0xd1 = precrqu_s.qb.ph $s3, $s4, $s5
|
||||
0x7c,0x15,0xa3,0x12 = preceq.w.phl $s4, $s5
|
||||
0x7c,0x16,0xab,0x52 = preceq.w.phr $s5, $s6
|
||||
0x7c,0x17,0xb1,0x12 = precequ.ph.qbl $s6, $s7
|
||||
0x7c,0x18,0xb9,0x52 = precequ.ph.qbr $s7, $t8
|
||||
0x7c,0x19,0xc1,0x92 = precequ.ph.qbla $t8, $t9
|
||||
0x7c,0x1a,0xc9,0xd2 = precequ.ph.qbra $t9, $k0
|
||||
0x7c,0x1b,0xd7,0x12 = preceu.ph.qbl $k0, $k1
|
||||
0x7c,0x1c,0xdf,0x52 = preceu.ph.qbr $k1, $gp
|
||||
0x7c,0x1d,0xe7,0x92 = preceu.ph.qbla $gp, $sp
|
||||
0x7c,0x1e,0xef,0xd2 = preceu.ph.qbra $sp, $fp
|
||||
0x7f,0x19,0xbb,0x51 = precr.qb.ph $23, $24, $25
|
||||
0x7f,0x38,0x07,0x91 = precr_sra.ph.w $24, $25, 0
|
||||
0x7f,0x38,0xff,0x91 = precr_sra.ph.w $24, $25, 31
|
||||
0x7f,0x59,0x07,0xd1 = precr_sra_r.ph.w $25, $26, 0
|
||||
0x7f,0x59,0xff,0xd1 = precr_sra_r.ph.w $25, $26, 31
|
||||
0x7f,0x54,0x51,0x8a = lbux $10, $20($26)
|
||||
0x7f,0x75,0x59,0x0a = lhx $11, $21($27)
|
||||
0x7f,0x96,0x60,0x0a = lwx $12, $22($gp)
|
||||
0x00,0x43,0x18,0x18 = mult $ac3, $2, $3
|
||||
0x00,0x85,0x10,0x19 = multu $ac2, $4, $5
|
||||
0x70,0xc7,0x08,0x00 = madd $ac1, $6, $7
|
||||
0x71,0x09,0x00,0x01 = maddu $ac0, $8, $9
|
||||
0x71,0x4b,0x18,0x04 = msub $ac3, $10, $11
|
||||
0x71,0x8d,0x10,0x05 = msubu $ac2, $12, $13
|
||||
0x00,0x20,0x70,0x10 = mfhi $14, $ac1
|
||||
0x00,0x00,0x78,0x12 = mflo $15, $ac0
|
||||
0x02,0x00,0x18,0x11 = mthi $16, $ac3
|
||||
0x02,0x20,0x10,0x13 = mtlo $17, $ac2
|
||||
0x00,0x43,0x00,0x18 = mult $2, $3
|
||||
0x00,0x85,0x00,0x19 = multu $4, $5
|
||||
0x70,0xc7,0x00,0x00 = madd $6, $7
|
||||
0x71,0x09,0x00,0x01 = maddu $8, $9
|
||||
0x71,0x4b,0x00,0x04 = msub $10, $11
|
||||
0x71,0x8d,0x00,0x05 = msubu $12, $13
|
||||
0x00,0x00,0x70,0x10 = mfhi $14
|
||||
0x00,0x00,0x78,0x12 = mflo $15
|
||||
0x02,0x00,0x00,0x11 = mthi $16
|
||||
0x02,0x20,0x00,0x13 = mtlo $17
|
||||
0x7f,0x19,0xbb,0x51 = precr.qb.ph $s7, $t8, $t9
|
||||
0x7f,0x38,0x07,0x91 = precr_sra.ph.w $t8, $t9, 0
|
||||
0x7f,0x38,0xff,0x91 = precr_sra.ph.w $t8, $t9, 31
|
||||
0x7f,0x59,0x07,0xd1 = precr_sra_r.ph.w $t9, $k0, 0
|
||||
0x7f,0x59,0xff,0xd1 = precr_sra_r.ph.w $t9, $k0, 31
|
||||
0x7f,0x54,0x51,0x8a = lbux $t2, $s4($k0)
|
||||
0x7f,0x75,0x59,0x0a = lhx $t3, $s5($k1)
|
||||
0x7f,0x96,0x60,0x0a = lwx $t4, $s6($gp)
|
||||
0x00,0x43,0x18,0x18 = mult $ac3, $v0, $v1
|
||||
0x00,0x85,0x10,0x19 = multu $ac2, $a0, $a1
|
||||
0x70,0xc7,0x08,0x00 = madd $ac1, $a2, $a3
|
||||
0x71,0x09,0x00,0x01 = maddu $ac0, $t0, $t1
|
||||
0x71,0x4b,0x18,0x04 = msub $ac3, $t2, $t3
|
||||
0x71,0x8d,0x10,0x05 = msubu $ac2, $t4, $t5
|
||||
0x00,0x20,0x70,0x10 = mfhi $t6, $ac1
|
||||
0x00,0x00,0x78,0x12 = mflo $t7, $ac0
|
||||
0x02,0x00,0x18,0x11 = mthi $s0, $ac3
|
||||
0x02,0x20,0x10,0x13 = mtlo $s1, $ac2
|
||||
0x00,0x43,0x00,0x18 = mult $v0, $v1
|
||||
0x00,0x85,0x00,0x19 = multu $a0, $a1
|
||||
0x70,0xc7,0x00,0x00 = madd $a2, $a3
|
||||
0x71,0x09,0x00,0x01 = maddu $t0, $t1
|
||||
0x71,0x4b,0x00,0x04 = msub $t2, $t3
|
||||
0x71,0x8d,0x00,0x05 = msubu $t4, $t5
|
||||
0x00,0x00,0x70,0x10 = mfhi $t6
|
||||
0x00,0x00,0x78,0x12 = mflo $t7
|
||||
0x02,0x00,0x00,0x11 = mthi $s0
|
||||
0x02,0x20,0x00,0x13 = mtlo $s1
|
||||
|
|
|
@ -1,20 +1,19 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32, None
|
||||
0x7b,0x00,0x05,0x34 = ori $5, $zero, 123
|
||||
0xd7,0xf6,0x06,0x24 = addiu $6, $zero, -2345
|
||||
0x01,0x00,0x07,0x3c = lui $7, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $7, $7, 2
|
||||
0x14,0x00,0x04,0x24 = addiu $4, $zero, 20
|
||||
0x01,0x00,0x07,0x3c = lui $7, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $7, $7, 2
|
||||
0x14,0x00,0xa4,0x24 = addiu $4, $5, 20
|
||||
0x01,0x00,0x07,0x3c = lui $7, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $7, $7, 2
|
||||
0x21,0x38,0xe8,0x00 = addu $7, $7, $8
|
||||
0x21,0x50,0x44,0x01 = addu $10, $10, $4
|
||||
0x21,0x08,0x29,0x00 = addu $1, $1, $9
|
||||
0x0a,0x00,0x0a,0x3c = lui $10, 10
|
||||
0x21,0x50,0x44,0x01 = addu $10, $10, $4
|
||||
0x7b,0x00,0x4a,0x8d = lw $10, 123($10)
|
||||
0x02,0x00,0x01,0x3c = lui $1, 2
|
||||
0x21,0x08,0x29,0x00 = addu $1, $1, $9
|
||||
0x40,0xe2,0x2a,0xac = sw $10, 57920($1)
|
||||
0x7b,0x00,0x05,0x34 = ori $a1, $zero, 123
|
||||
0xd7,0xf6,0x06,0x24 = addiu $a2, $zero, -2345
|
||||
0x01,0x00,0x07,0x3c = lui $a3, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $a3, $a3, 2
|
||||
0x14,0x00,0x04,0x24 = addiu $a0, $zero, 20
|
||||
0x01,0x00,0x07,0x3c = lui $a3, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $a3, $a3, 2
|
||||
0x14,0x00,0xa4,0x24 = addiu $a0, $a1, 20
|
||||
0x01,0x00,0x07,0x3c = lui $a3, 1
|
||||
0x02,0x00,0xe7,0x34 = ori $a3, $a3, 2
|
||||
0x21,0x38,0xe8,0x00 = addu $a3, $a3, $t0
|
||||
0x21,0x50,0x44,0x01 = addu $t2, $t2, $a0
|
||||
0x21,0x08,0x29,0x00 = addu $at, $at, $t1
|
||||
0x0a,0x00,0x0a,0x3c = lui $t2, 10
|
||||
0x7b,0x00,0x4a,0x8d = lw $t2, 123($t2)
|
||||
0x02,0x00,0x01,0x3c = lui $at, 2
|
||||
0x21,0x08,0x29,0x00 = addu $at, $at, $t1
|
||||
0x40,0xe2,0x2a,0xac = sw $t2, 57920($at)
|
||||
|
|
|
@ -57,36 +57,36 @@
|
|||
0xa0,0x39,0x80,0x46 = cvt.s.w $f6, $f7
|
||||
0x24,0x73,0x20,0x46 = cvt.w.d $f12, $f14
|
||||
0xa4,0x39,0x00,0x46 = cvt.w.s $f6, $f7
|
||||
0x00,0x00,0x46,0x44 = cfc1 $6, $0
|
||||
0x00,0xf8,0xca,0x44 = ctc1 $10, $31
|
||||
0x00,0x38,0x06,0x44 = mfc1 $6, $f7
|
||||
0x10,0x28,0x00,0x00 = mfhi $5
|
||||
0x12,0x28,0x00,0x00 = mflo $5
|
||||
0x00,0x00,0x46,0x44 = cfc1 $a2, $0
|
||||
0x00,0xf8,0xca,0x44 = ctc1 $t2, $31
|
||||
0x00,0x38,0x06,0x44 = mfc1 $a2, $f7
|
||||
0x10,0x28,0x00,0x00 = mfhi $a1
|
||||
0x12,0x28,0x00,0x00 = mflo $a1
|
||||
0x86,0x41,0x20,0x46 = mov.d $f6, $f8
|
||||
0x86,0x39,0x00,0x46 = mov.s $f6, $f7
|
||||
0x00,0x38,0x86,0x44 = mtc1 $6, $f7
|
||||
0x11,0x00,0xe0,0x00 = mthi $7
|
||||
0x13,0x00,0xe0,0x00 = mtlo $7
|
||||
0xc6,0x23,0xe9,0xe4 = swc1 $f9, 9158($7)
|
||||
0x00,0x38,0x06,0x40 = mfc0 $6, $7, 0
|
||||
0x00,0x40,0x89,0x40 = mtc0 $9, $8, 0
|
||||
0x00,0x38,0x05,0x48 = mfc2 $5, $7, 0
|
||||
0x00,0x20,0x89,0x48 = mtc2 $9, $4, 0
|
||||
0x02,0x38,0x06,0x40 = mfc0 $6, $7, 2
|
||||
0x03,0x40,0x89,0x40 = mtc0 $9, $8, 3
|
||||
0x04,0x38,0x05,0x48 = mfc2 $5, $7, 4
|
||||
0x05,0x20,0x89,0x48 = mtc2 $9, $4, 5
|
||||
0x01,0x10,0x20,0x00 = movf $2, $1, $fcc0
|
||||
0x01,0x10,0x21,0x00 = movt $2, $1, $fcc0
|
||||
0x01,0x20,0xb1,0x00 = movt $4, $5, $fcc4
|
||||
0x00,0x38,0x86,0x44 = mtc1 $a2, $f7
|
||||
0x11,0x00,0xe0,0x00 = mthi $a3
|
||||
0x13,0x00,0xe0,0x00 = mtlo $a3
|
||||
0xc6,0x23,0xe9,0xe4 = swc1 $f9, 9158($a3)
|
||||
0x00,0x38,0x06,0x40 = mfc0 $a2, $a3, 0
|
||||
0x00,0x40,0x89,0x40 = mtc0 $t1, $t0, 0
|
||||
0x00,0x38,0x05,0x48 = mfc2 $a1, $a3, 0
|
||||
0x00,0x20,0x89,0x48 = mtc2 $t1, $a0, 0
|
||||
0x02,0x38,0x06,0x40 = mfc0 $a2, $a3, 2
|
||||
0x03,0x40,0x89,0x40 = mtc0 $t1, $t0, 3
|
||||
0x04,0x38,0x05,0x48 = mfc2 $a1, $a3, 4
|
||||
0x05,0x20,0x89,0x48 = mtc2 $t1, $a0, 5
|
||||
0x01,0x10,0x20,0x00 = movf $v0, $at, $fcc0
|
||||
0x01,0x10,0x21,0x00 = movt $v0, $at, $fcc0
|
||||
0x01,0x20,0xb1,0x00 = movt $a0, $a1, $fcc4
|
||||
0x11,0x31,0x28,0x46 = movf.d $f4, $f6, $fcc2
|
||||
0x11,0x31,0x14,0x46 = movf.s $f4, $f6, $fcc5
|
||||
0x05,0x00,0xa6,0x4c = luxc1 $f0, $6($5)
|
||||
0x0d,0x20,0xb8,0x4c = suxc1 $f4, $24($5)
|
||||
0x00,0x05,0xcc,0x4d = lwxc1 $f20, $12($14)
|
||||
0x08,0xd0,0xd2,0x4e = swxc1 $f26, $18($22)
|
||||
0x00,0x20,0x71,0x44 = mfhc1 $17, $f4
|
||||
0x00,0x30,0xf1,0x44 = mthc1 $17, $f6
|
||||
0x05,0x00,0xa6,0x4c = luxc1 $f0, $a2($a1)
|
||||
0x0d,0x20,0xb8,0x4c = suxc1 $f4, $t8($a1)
|
||||
0x00,0x05,0xcc,0x4d = lwxc1 $f20, $t4($t6)
|
||||
0x08,0xd0,0xd2,0x4e = swxc1 $f26, $s2($s6)
|
||||
0x00,0x20,0x71,0x44 = mfhc1 $s1, $f4
|
||||
0x00,0x30,0xf1,0x44 = mthc1 $s1, $f6
|
||||
0x10,0x00,0xa4,0xeb = swc2 $4, 16($sp)
|
||||
0x10,0x00,0xa4,0xfb = sdc2 $4, 16($sp)
|
||||
0x0c,0x00,0xeb,0xcb = lwc2 $11, 12($ra)
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32, None
|
||||
0x10,0x00,0xa4,0xa0 = sb $4, 16($5)
|
||||
0x10,0x00,0xa4,0xe0 = sc $4, 16($5)
|
||||
0x10,0x00,0xa4,0xa4 = sh $4, 16($5)
|
||||
0x10,0x00,0xa4,0xac = sw $4, 16($5)
|
||||
0x00,0x00,0xa7,0xac = sw $7, 0($5)
|
||||
0x10,0x00,0xa2,0xe4 = swc1 $f2, 16($5)
|
||||
0x10,0x00,0xa4,0xa8 = swl $4, 16($5)
|
||||
0x04,0x00,0xa4,0x80 = lb $4, 4($5)
|
||||
0x04,0x00,0xa4,0x8c = lw $4, 4($5)
|
||||
0x04,0x00,0xa4,0x90 = lbu $4, 4($5)
|
||||
0x04,0x00,0xa4,0x84 = lh $4, 4($5)
|
||||
0x04,0x00,0xa4,0x94 = lhu $4, 4($5)
|
||||
0x04,0x00,0xa4,0xc0 = ll $4, 4($5)
|
||||
0x04,0x00,0xa4,0x8c = lw $4, 4($5)
|
||||
0x00,0x00,0xe7,0x8c = lw $7, 0($7)
|
||||
0x10,0x00,0xa2,0x8f = lw $2, 16($sp)
|
||||
0x10,0x00,0xa4,0xa0 = sb $a0, 16($a1)
|
||||
0x10,0x00,0xa4,0xe0 = sc $a0, 16($a1)
|
||||
0x10,0x00,0xa4,0xa4 = sh $a0, 16($a1)
|
||||
0x10,0x00,0xa4,0xac = sw $a0, 16($a1)
|
||||
0x00,0x00,0xa7,0xac = sw $a3, ($a1)
|
||||
0x10,0x00,0xa2,0xe4 = swc1 $f2, 16($a1)
|
||||
0x10,0x00,0xa4,0xa8 = swl $a0, 16($a1)
|
||||
0x04,0x00,0xa4,0x80 = lb $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0x8c = lw $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0x90 = lbu $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0x84 = lh $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0x94 = lhu $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0xc0 = ll $a0, 4($a1)
|
||||
0x04,0x00,0xa4,0x8c = lw $a0, 4($a1)
|
||||
0x00,0x00,0xe7,0x8c = lw $a3, ($a3)
|
||||
0x10,0x00,0xa2,0x8f = lw $v0, 16($sp)
|
||||
|
|
|
@ -1,47 +1,47 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS64, None
|
||||
0x24,0x48,0xc7,0x00 = and $9, $6, $7
|
||||
0x67,0x45,0xc9,0x30 = andi $9, $6, 17767
|
||||
0x67,0x45,0xc9,0x30 = andi $9, $6, 17767
|
||||
0x21,0x30,0xe6,0x70 = clo $6, $7
|
||||
0x20,0x30,0xe6,0x70 = clz $6, $7
|
||||
0x84,0x61,0x33,0x7d = ins $19, $9, 6, 7
|
||||
0x27,0x48,0xc7,0x00 = nor $9, $6, $7
|
||||
0x25,0x18,0x65,0x00 = or $3, $3, $5
|
||||
0x67,0x45,0xa4,0x34 = ori $4, $5, 17767
|
||||
0x67,0x45,0xc9,0x34 = ori $9, $6, 17767
|
||||
0xc2,0x49,0x26,0x00 = rotr $9, $6, 7
|
||||
0x46,0x48,0xe6,0x00 = rotrv $9, $6, $7
|
||||
0xc0,0x21,0x03,0x00 = sll $4, $3, 7
|
||||
0x04,0x10,0xa3,0x00 = sllv $2, $3, $5
|
||||
0x2a,0x18,0x65,0x00 = slt $3, $3, $5
|
||||
0x67,0x00,0x63,0x28 = slti $3, $3, 103
|
||||
0x67,0x00,0x63,0x28 = slti $3, $3, 103
|
||||
0x67,0x00,0x63,0x2c = sltiu $3, $3, 103
|
||||
0x2b,0x18,0x65,0x00 = sltu $3, $3, $5
|
||||
0xc3,0x21,0x03,0x00 = sra $4, $3, 7
|
||||
0x07,0x10,0xa3,0x00 = srav $2, $3, $5
|
||||
0xc2,0x21,0x03,0x00 = srl $4, $3, 7
|
||||
0x06,0x10,0xa3,0x00 = srlv $2, $3, $5
|
||||
0x26,0x18,0x65,0x00 = xor $3, $3, $5
|
||||
0x67,0x45,0xc9,0x38 = xori $9, $6, 17767
|
||||
0x67,0x45,0xc9,0x38 = xori $9, $6, 17767
|
||||
0xa0,0x30,0x07,0x7c = wsbh $6, $7
|
||||
0x27,0x38,0x00,0x01 = not $7, $8
|
||||
0x2c,0x48,0xc7,0x00 = dadd $9, $6, $7
|
||||
0x67,0x45,0xc9,0x60 = daddi $9, $6, 17767
|
||||
0x67,0xc5,0xc9,0x64 = daddiu $9, $6, -15001
|
||||
0x67,0x45,0xc9,0x60 = daddi $9, $6, 17767
|
||||
0x67,0x45,0x29,0x61 = daddi $9, $9, 17767
|
||||
0x67,0xc5,0xc9,0x64 = daddiu $9, $6, -15001
|
||||
0x67,0xc5,0x29,0x65 = daddiu $9, $9, -15001
|
||||
0x2d,0x48,0xc7,0x00 = daddu $9, $6, $7
|
||||
0x3a,0x4d,0x26,0x00 = drotr $9, $6, 20
|
||||
0x3e,0x4d,0x26,0x00 = drotr32 $9, $6, 52
|
||||
0x00,0x00,0xc7,0x70 = madd $6, $7
|
||||
0x01,0x00,0xc7,0x70 = maddu $6, $7
|
||||
0x04,0x00,0xc7,0x70 = msub $6, $7
|
||||
0x05,0x00,0xc7,0x70 = msubu $6, $7
|
||||
0x18,0x00,0x65,0x00 = mult $3, $5
|
||||
0x19,0x00,0x65,0x00 = multu $3, $5
|
||||
0x2f,0x20,0x65,0x00 = dsubu $4, $3, $5
|
||||
0x2d,0x38,0x00,0x01 = move $7, $8
|
||||
0x24,0x48,0xc7,0x00 = and $t1, $a2, $a3
|
||||
0x67,0x45,0xc9,0x30 = andi $t1, $a2, 17767
|
||||
0x67,0x45,0xc9,0x30 = andi $t1, $a2, 17767
|
||||
0x21,0x30,0xe6,0x70 = clo $a2, $a3
|
||||
0x20,0x30,0xe6,0x70 = clz $a2, $a3
|
||||
0x84,0x61,0x33,0x7d = ins $s3, $t1, 6, 7
|
||||
0x27,0x48,0xc7,0x00 = nor $t1, $a2, $a3
|
||||
0x25,0x18,0x65,0x00 = or $v1, $v1, $a1
|
||||
0x67,0x45,0xa4,0x34 = ori $a0, $a1, 17767
|
||||
0x67,0x45,0xc9,0x34 = ori $t1, $a2, 17767
|
||||
0xc2,0x49,0x26,0x00 = rotr $t1, $a2, 7
|
||||
0x46,0x48,0xe6,0x00 = rotrv $t1, $a2, $a3
|
||||
0xc0,0x21,0x03,0x00 = sll $a0, $v1, 7
|
||||
0x04,0x10,0xa3,0x00 = sllv $v0, $v1, $a1
|
||||
0x2a,0x18,0x65,0x00 = slt $v1, $v1, $a1
|
||||
0x67,0x00,0x63,0x28 = slti $v1, $v1, 103
|
||||
0x67,0x00,0x63,0x28 = slti $v1, $v1, 103
|
||||
0x67,0x00,0x63,0x2c = sltiu $v1, $v1, 103
|
||||
0x2b,0x18,0x65,0x00 = sltu $v1, $v1, $a1
|
||||
0xc3,0x21,0x03,0x00 = sra $a0, $v1, 7
|
||||
0x07,0x10,0xa3,0x00 = srav $v0, $v1, $a1
|
||||
0xc2,0x21,0x03,0x00 = srl $a0, $v1, 7
|
||||
0x06,0x10,0xa3,0x00 = srlv $v0, $v1, $a1
|
||||
0x26,0x18,0x65,0x00 = xor $v1, $v1, $a1
|
||||
0x67,0x45,0xc9,0x38 = xori $t1, $a2, 17767
|
||||
0x67,0x45,0xc9,0x38 = xori $t1, $a2, 17767
|
||||
0xa0,0x30,0x07,0x7c = wsbh $a2, $a3
|
||||
0x27,0x38,0x00,0x01 = not $a3, $t0
|
||||
0x2c,0x48,0xc7,0x00 = dadd $t1, $a2, $a3
|
||||
0x67,0x45,0xc9,0x60 = daddi $t1, $a2, 17767
|
||||
0x67,0xc5,0xc9,0x64 = daddiu $t1, $a2, -15001
|
||||
0x67,0x45,0xc9,0x60 = daddi $t1, $a2, 17767
|
||||
0x67,0x45,0x29,0x61 = daddi $t1, $t1, 17767
|
||||
0x67,0xc5,0xc9,0x64 = daddiu $t1, $a2, -15001
|
||||
0x67,0xc5,0x29,0x65 = daddiu $t1, $t1, -15001
|
||||
0x2d,0x48,0xc7,0x00 = daddu $t1, $a2, $a3
|
||||
0x3a,0x4d,0x26,0x00 = drotr $t1, $a2, 20
|
||||
0x3e,0x4d,0x26,0x00 = drotr32 $t1, $a2, 52
|
||||
0x00,0x00,0xc7,0x70 = madd $a2, $a3
|
||||
0x01,0x00,0xc7,0x70 = maddu $a2, $a3
|
||||
0x04,0x00,0xc7,0x70 = msub $a2, $a3
|
||||
0x05,0x00,0xc7,0x70 = msubu $a2, $a3
|
||||
0x18,0x00,0x65,0x00 = mult $v1, $a1
|
||||
0x19,0x00,0x65,0x00 = multu $v1, $a1
|
||||
0x2f,0x20,0x65,0x00 = dsubu $a0, $v1, $a1
|
||||
0x2d,0x38,0x00,0x01 = move $a3, $t0
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS64, None
|
||||
0x81,0x00,0x42,0x4d = ldxc1 $f2, $2($10)
|
||||
0x09,0x40,0x24,0x4f = sdxc1 $f8, $4($25)
|
||||
0x81,0x00,0x42,0x4d = ldxc1 $f2, $v0($t2)
|
||||
0x09,0x40,0x24,0x4f = sdxc1 $f8, $a0($t9)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x10,0x00,0x01,0x4d = b 1332
|
||||
0x10,0x00,0x01,0x4d = b 1336
|
||||
0x08,0x00,0x01,0x4c = j 1328
|
||||
0x0c,0x00,0x01,0x4c = jal 1328
|
||||
0x10,0x00,0x01,0x4d = b 1332
|
||||
0x10,0x00,0x01,0x4d = b 1336
|
||||
0x00,0x00,0x00,0x00 = nop
|
||||
0x08,0x00,0x01,0x4c = j 1328
|
||||
0x00,0x00,0x00,0x00 = nop
|
||||
0x0c,0x00,0x01,0x4c = jal 1328
|
||||
0x00,0x00,0x00,0x00 = nop
|
||||
0x46,0x00,0x39,0x85 = abs.s $f6, $f7
|
||||
0x01,0xef,0x18,0x24 = and $3, $15, $15
|
||||
0x01,0xef,0x18,0x24 = and $v1, $t7, $t7
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN, None
|
||||
0x02,0x04,0x80,0x20 = add $16, $16, $4
|
||||
0x02,0x06,0x80,0x20 = add $16, $16, $6
|
||||
0x02,0x07,0x80,0x20 = add $16, $16, $7
|
||||
0x02,0x08,0x80,0x20 = add $16, $16, $8
|
||||
0x02,0x09,0x80,0x20 = add $16, $16, $9
|
||||
0x02,0x0a,0x80,0x20 = add $16, $16, $10
|
||||
0x02,0x0b,0x80,0x20 = add $16, $16, $11
|
||||
0x02,0x0c,0x80,0x20 = add $16, $16, $12
|
||||
0x02,0x0d,0x80,0x20 = add $16, $16, $13
|
||||
0x02,0x0e,0x80,0x20 = add $16, $16, $14
|
||||
0x02,0x0f,0x80,0x20 = add $16, $16, $15
|
||||
0x02,0x04,0x80,0x20 = add $s0, $s0, $a0
|
||||
0x02,0x06,0x80,0x20 = add $s0, $s0, $a2
|
||||
0x02,0x07,0x80,0x20 = add $s0, $s0, $a3
|
||||
0x02,0x08,0x80,0x20 = add $s0, $s0, $t0
|
||||
0x02,0x09,0x80,0x20 = add $s0, $s0, $t1
|
||||
0x02,0x0a,0x80,0x20 = add $s0, $s0, $t2
|
||||
0x02,0x0b,0x80,0x20 = add $s0, $s0, $t3
|
||||
0x02,0x0c,0x80,0x20 = add $s0, $s0, $t4
|
||||
0x02,0x0d,0x80,0x20 = add $s0, $s0, $t5
|
||||
0x02,0x0e,0x80,0x20 = add $s0, $s0, $t6
|
||||
0x02,0x0f,0x80,0x20 = add $s0, $s0, $t7
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32, None
|
||||
0x08,0x00,0x60,0x00 = jr $3
|
||||
0x08,0x00,0x60,0x00 = jr $v1
|
||||
0x08,0x00,0x80,0x03 = jr $gp
|
||||
0x08,0x00,0xc0,0x03 = jr $fp
|
||||
0x08,0x00,0xa0,0x03 = jr $sp
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x7b,0x00,0x4f,0x9e = fill.b $w30, $9
|
||||
0x7b,0x01,0xbf,0xde = fill.h $w31, $23
|
||||
0x7b,0x02,0xc4,0x1e = fill.w $w16, $24
|
||||
0x7b,0x00,0x4f,0x9e = fill.b $w30, $t1
|
||||
0x7b,0x01,0xbf,0xde = fill.h $w31, $s7
|
||||
0x7b,0x02,0xc4,0x1e = fill.w $w16, $t8
|
||||
0x7b,0x08,0x05,0x5e = nloc.b $w21, $w0
|
||||
0x7b,0x09,0xfc,0x9e = nloc.h $w18, $w31
|
||||
0x7b,0x0a,0xb8,0x9e = nloc.w $w2, $w23
|
||||
|
|
|
@ -189,18 +189,18 @@
|
|||
0x79,0xa8,0x2e,0x94 = pckod.h $w26, $w5, $w8
|
||||
0x79,0xc2,0x22,0x54 = pckod.w $w9, $w4, $w2
|
||||
0x79,0xf4,0xb7,0x94 = pckod.d $w30, $w22, $w20
|
||||
0x78,0x0c,0xb9,0x54 = sld.b $w5, $w23[$12]
|
||||
0x78,0x23,0xb8,0x54 = sld.h $w1, $w23[$3]
|
||||
0x78,0x49,0x45,0x14 = sld.w $w20, $w8[$9]
|
||||
0x78,0x0c,0xb9,0x54 = sld.b $w5, $w23[$t4]
|
||||
0x78,0x23,0xb8,0x54 = sld.h $w1, $w23[$v1]
|
||||
0x78,0x49,0x45,0x14 = sld.w $w20, $w8[$t1]
|
||||
0x78,0x7e,0xb9,0xd4 = sld.d $w7, $w23[$fp]
|
||||
0x78,0x11,0x00,0xcd = sll.b $w3, $w0, $w17
|
||||
0x78,0x23,0xdc,0x4d = sll.h $w17, $w27, $w3
|
||||
0x78,0x46,0x3c,0x0d = sll.w $w16, $w7, $w6
|
||||
0x78,0x7a,0x02,0x4d = sll.d $w9, $w0, $w26
|
||||
0x78,0x81,0x0f,0x14 = splat.b $w28, $w1[$1]
|
||||
0x78,0xab,0x58,0x94 = splat.h $w2, $w11[$11]
|
||||
0x78,0xcb,0x05,0x94 = splat.w $w22, $w0[$11]
|
||||
0x78,0xe2,0x00,0x14 = splat.d $w0, $w0[$2]
|
||||
0x78,0x81,0x0f,0x14 = splat.b $w28, $w1[$at]
|
||||
0x78,0xab,0x58,0x94 = splat.h $w2, $w11[$t3]
|
||||
0x78,0xcb,0x05,0x94 = splat.w $w22, $w0[$t3]
|
||||
0x78,0xe2,0x00,0x14 = splat.d $w0, $w0[$v0]
|
||||
0x78,0x91,0x27,0x0d = sra.b $w28, $w4, $w17
|
||||
0x78,0xa3,0x4b,0x4d = sra.h $w13, $w9, $w3
|
||||
0x78,0xd3,0xae,0xcd = sra.w $w27, $w21, $w19
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x78,0x7e,0x00,0x59 = cfcmsa $1, $0
|
||||
0x78,0x7e,0x00,0x59 = cfcmsa $1, $0
|
||||
0x78,0x7e,0x08,0x99 = cfcmsa $2, $1
|
||||
0x78,0x7e,0x08,0x99 = cfcmsa $2, $1
|
||||
0x78,0x7e,0x10,0xd9 = cfcmsa $3, $2
|
||||
0x78,0x7e,0x10,0xd9 = cfcmsa $3, $2
|
||||
0x78,0x7e,0x19,0x19 = cfcmsa $4, $3
|
||||
0x78,0x7e,0x19,0x19 = cfcmsa $4, $3
|
||||
0x78,0x7e,0x21,0x59 = cfcmsa $5, $4
|
||||
0x78,0x7e,0x21,0x59 = cfcmsa $5, $4
|
||||
0x78,0x7e,0x29,0x99 = cfcmsa $6, $5
|
||||
0x78,0x7e,0x29,0x99 = cfcmsa $6, $5
|
||||
0x78,0x7e,0x31,0xd9 = cfcmsa $7, $6
|
||||
0x78,0x7e,0x31,0xd9 = cfcmsa $7, $6
|
||||
0x78,0x7e,0x3a,0x19 = cfcmsa $8, $7
|
||||
0x78,0x7e,0x3a,0x19 = cfcmsa $8, $7
|
||||
0x78,0x3e,0x08,0x19 = ctcmsa $0, $1
|
||||
0x78,0x3e,0x08,0x19 = ctcmsa $0, $1
|
||||
0x78,0x3e,0x10,0x59 = ctcmsa $1, $2
|
||||
0x78,0x3e,0x10,0x59 = ctcmsa $1, $2
|
||||
0x78,0x3e,0x18,0x99 = ctcmsa $2, $3
|
||||
0x78,0x3e,0x18,0x99 = ctcmsa $2, $3
|
||||
0x78,0x3e,0x20,0xd9 = ctcmsa $3, $4
|
||||
0x78,0x3e,0x20,0xd9 = ctcmsa $3, $4
|
||||
0x78,0x3e,0x29,0x19 = ctcmsa $4, $5
|
||||
0x78,0x3e,0x29,0x19 = ctcmsa $4, $5
|
||||
0x78,0x3e,0x31,0x59 = ctcmsa $5, $6
|
||||
0x78,0x3e,0x31,0x59 = ctcmsa $5, $6
|
||||
0x78,0x3e,0x39,0x99 = ctcmsa $6, $7
|
||||
0x78,0x3e,0x39,0x99 = ctcmsa $6, $7
|
||||
0x78,0x3e,0x41,0xd9 = ctcmsa $7, $8
|
||||
0x78,0x3e,0x41,0xd9 = ctcmsa $7, $8
|
||||
0x78,0x7e,0x00,0x59 = cfcmsa $at, $0
|
||||
0x78,0x7e,0x00,0x59 = cfcmsa $at, $0
|
||||
0x78,0x7e,0x08,0x99 = cfcmsa $v0, $1
|
||||
0x78,0x7e,0x08,0x99 = cfcmsa $v0, $1
|
||||
0x78,0x7e,0x10,0xd9 = cfcmsa $v1, $2
|
||||
0x78,0x7e,0x10,0xd9 = cfcmsa $v1, $2
|
||||
0x78,0x7e,0x19,0x19 = cfcmsa $a0, $3
|
||||
0x78,0x7e,0x19,0x19 = cfcmsa $a0, $3
|
||||
0x78,0x7e,0x21,0x59 = cfcmsa $a1, $4
|
||||
0x78,0x7e,0x21,0x59 = cfcmsa $a1, $4
|
||||
0x78,0x7e,0x29,0x99 = cfcmsa $a2, $5
|
||||
0x78,0x7e,0x29,0x99 = cfcmsa $a2, $5
|
||||
0x78,0x7e,0x31,0xd9 = cfcmsa $a3, $6
|
||||
0x78,0x7e,0x31,0xd9 = cfcmsa $a3, $6
|
||||
0x78,0x7e,0x3a,0x19 = cfcmsa $t0, $7
|
||||
0x78,0x7e,0x3a,0x19 = cfcmsa $t0, $7
|
||||
0x78,0x3e,0x08,0x19 = ctcmsa $0, $at
|
||||
0x78,0x3e,0x08,0x19 = ctcmsa $0, $at
|
||||
0x78,0x3e,0x10,0x59 = ctcmsa $1, $v0
|
||||
0x78,0x3e,0x10,0x59 = ctcmsa $1, $v0
|
||||
0x78,0x3e,0x18,0x99 = ctcmsa $2, $v1
|
||||
0x78,0x3e,0x18,0x99 = ctcmsa $2, $v1
|
||||
0x78,0x3e,0x20,0xd9 = ctcmsa $3, $a0
|
||||
0x78,0x3e,0x20,0xd9 = ctcmsa $3, $a0
|
||||
0x78,0x3e,0x29,0x19 = ctcmsa $4, $a1
|
||||
0x78,0x3e,0x29,0x19 = ctcmsa $4, $a1
|
||||
0x78,0x3e,0x31,0x59 = ctcmsa $5, $a2
|
||||
0x78,0x3e,0x31,0x59 = ctcmsa $5, $a2
|
||||
0x78,0x3e,0x39,0x99 = ctcmsa $6, $a3
|
||||
0x78,0x3e,0x39,0x99 = ctcmsa $6, $a3
|
||||
0x78,0x3e,0x41,0xd9 = ctcmsa $7, $t0
|
||||
0x78,0x3e,0x41,0xd9 = ctcmsa $7, $t0
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x78,0x82,0x43,0x59 = copy_s.b $13, $w8[2]
|
||||
0x78,0xa0,0xc8,0x59 = copy_s.h $1, $w25[0]
|
||||
0x78,0xb1,0x2d,0x99 = copy_s.w $22, $w5[1]
|
||||
0x78,0xc4,0xa5,0x99 = copy_u.b $22, $w20[4]
|
||||
0x78,0xe0,0x25,0x19 = copy_u.h $20, $w4[0]
|
||||
0x78,0x82,0x43,0x59 = copy_s.b $t5, $w8[2]
|
||||
0x78,0xa0,0xc8,0x59 = copy_s.h $at, $w25[0]
|
||||
0x78,0xb1,0x2d,0x99 = copy_s.w $s6, $w5[1]
|
||||
0x78,0xc4,0xa5,0x99 = copy_u.b $s6, $w20[4]
|
||||
0x78,0xe0,0x25,0x19 = copy_u.h $s4, $w4[0]
|
||||
0x78,0xf2,0x6f,0x99 = copy_u.w $fp, $w13[2]
|
||||
0x78,0x04,0xe8,0x19 = sldi.b $w0, $w29[4]
|
||||
0x78,0x20,0x8a,0x19 = sldi.h $w8, $w17[0]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x79,0x03,0xed,0xd9 = insert.b $w23[3], $sp
|
||||
0x79,0x22,0x2d,0x19 = insert.h $w20[2], $5
|
||||
0x79,0x32,0x7a,0x19 = insert.w $w8[2], $15
|
||||
0x79,0x22,0x2d,0x19 = insert.h $w20[2], $a1
|
||||
0x79,0x32,0x7a,0x19 = insert.w $w8[2], $t7
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x01,0x2a,0x40,0x05 = lsa $8, $9, $10, 1
|
||||
0x01,0x2a,0x40,0x45 = lsa $8, $9, $10, 2
|
||||
0x01,0x2a,0x40,0x85 = lsa $8, $9, $10, 3
|
||||
0x01,0x2a,0x40,0xc5 = lsa $8, $9, $10, 4
|
||||
0x01,0x2a,0x40,0x05 = lsa $t0, $t1, $t2, 1
|
||||
0x01,0x2a,0x40,0x45 = lsa $t0, $t1, $t2, 2
|
||||
0x01,0x2a,0x40,0x85 = lsa $t0, $t1, $t2, 3
|
||||
0x01,0x2a,0x40,0xc5 = lsa $t0, $t1, $t2, 4
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
# CS_ARCH_MIPS, CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN, None
|
||||
0x7a,0x00,0x08,0x20 = ld.b $w0, -512($1)
|
||||
0x78,0x00,0x10,0x60 = ld.b $w1, 0($2)
|
||||
0x79,0xff,0x18,0xa0 = ld.b $w2, 511($3)
|
||||
0x7a,0x00,0x20,0xe1 = ld.h $w3, -1024($4)
|
||||
0x7b,0x00,0x29,0x21 = ld.h $w4, -512($5)
|
||||
0x78,0x00,0x31,0x61 = ld.h $w5, 0($6)
|
||||
0x79,0x00,0x39,0xa1 = ld.h $w6, 512($7)
|
||||
0x79,0xff,0x41,0xe1 = ld.h $w7, 1022($8)
|
||||
0x7a,0x00,0x4a,0x22 = ld.w $w8, -2048($9)
|
||||
0x7b,0x00,0x52,0x62 = ld.w $w9, -1024($10)
|
||||
0x7b,0x80,0x5a,0xa2 = ld.w $w10, -512($11)
|
||||
0x78,0x80,0x62,0xe2 = ld.w $w11, 512($12)
|
||||
0x79,0x00,0x6b,0x22 = ld.w $w12, 1024($13)
|
||||
0x79,0xff,0x73,0x62 = ld.w $w13, 2044($14)
|
||||
0x7a,0x00,0x7b,0xa3 = ld.d $w14, -4096($15)
|
||||
0x7b,0x00,0x83,0xe3 = ld.d $w15, -2048($16)
|
||||
0x7b,0x80,0x8c,0x23 = ld.d $w16, -1024($17)
|
||||
0x7b,0xc0,0x94,0x63 = ld.d $w17, -512($18)
|
||||
0x78,0x00,0x9c,0xa3 = ld.d $w18, 0($19)
|
||||
0x78,0x40,0xa4,0xe3 = ld.d $w19, 512($20)
|
||||
0x78,0x80,0xad,0x23 = ld.d $w20, 1024($21)
|
||||
0x79,0x00,0xb5,0x63 = ld.d $w21, 2048($22)
|
||||
0x79,0xff,0xbd,0xa3 = ld.d $w22, 4088($23)
|
||||
0x7a,0x00,0x08,0x20 = ld.b $w0, -512($at)
|
||||
0x78,0x00,0x10,0x60 = ld.b $w1, ($v0)
|
||||
0x79,0xff,0x18,0xa0 = ld.b $w2, 511($v1)
|
||||
0x7a,0x00,0x20,0xe1 = ld.h $w3, -1024($a0)
|
||||
0x7b,0x00,0x29,0x21 = ld.h $w4, -512($a1)
|
||||
0x78,0x00,0x31,0x61 = ld.h $w5, ($a2)
|
||||
0x79,0x00,0x39,0xa1 = ld.h $w6, 512($a3)
|
||||
0x79,0xff,0x41,0xe1 = ld.h $w7, 1022($t0)
|
||||
0x7a,0x00,0x4a,0x22 = ld.w $w8, -2048($t1)
|
||||
0x7b,0x00,0x52,0x62 = ld.w $w9, -1024($t2)
|
||||
0x7b,0x80,0x5a,0xa2 = ld.w $w10, -512($t3)
|
||||
0x78,0x80,0x62,0xe2 = ld.w $w11, 512($t4)
|
||||
0x79,0x00,0x6b,0x22 = ld.w $w12, 1024($t5)
|
||||
0x79,0xff,0x73,0x62 = ld.w $w13, 2044($t6)
|
||||
0x7a,0x00,0x7b,0xa3 = ld.d $w14, -4096($t7)
|
||||
0x7b,0x00,0x83,0xe3 = ld.d $w15, -2048($s0)
|
||||
0x7b,0x80,0x8c,0x23 = ld.d $w16, -1024($s1)
|
||||
0x7b,0xc0,0x94,0x63 = ld.d $w17, -512($s2)
|
||||
0x78,0x00,0x9c,0xa3 = ld.d $w18, ($s3)
|
||||
0x78,0x40,0xa4,0xe3 = ld.d $w19, 512($s4)
|
||||
0x78,0x80,0xad,0x23 = ld.d $w20, 1024($s5)
|
||||
0x79,0x00,0xb5,0x63 = ld.d $w21, 2048($s6)
|
||||
0x79,0xff,0xbd,0xa3 = ld.d $w22, 4088($s7)
|
||||
|
|
|
@ -54,7 +54,7 @@ extern char *(*function)(csh *, cs_mode, cs_insn*);
|
|||
|
||||
int get_index(double_dict d[], unsigned size, const char *str);
|
||||
int get_value(single_dict d[], unsigned size, const char *str);
|
||||
void test_single_MC(csh *handle, char *line);
|
||||
void test_single_MC(csh *handle, int mc_mode, char *line);
|
||||
void test_single_issue(csh *handle, cs_mode mode, char *line, int detail);
|
||||
int set_function(int arch);
|
||||
|
||||
|
|
|
@ -9,12 +9,17 @@
|
|||
#include <dirent.h>
|
||||
#include "capstone_test.h"
|
||||
|
||||
#define X86_16 0
|
||||
#define X86_32 1
|
||||
#define X86_64 2
|
||||
|
||||
char **split(char *str, char *delim, int *size);
|
||||
void print_strs(char **list_str, int size);
|
||||
void free_strs(char **list_str, int size);
|
||||
void add_str(char **src, const char *format, ...);
|
||||
void trim_str(char *src);
|
||||
void replace_hex(char *src);
|
||||
void replace_negative(char *src, int mode);
|
||||
const char *get_filename_ext(const char *filename);
|
||||
|
||||
char *readfile(const char *filename);
|
||||
|
|
|
@ -87,7 +87,7 @@ char *(*function)(csh *, cs_mode, cs_insn*) = NULL;
|
|||
static int quadruple_compare(const char *src1, const char *src2, const char *des1, const char *des2, const char *opcode)
|
||||
{
|
||||
if (strcmp(src1, des2) && strcmp(src2, des2) && strcmp(src1, des1) && strcmp(src1, des2)) {
|
||||
fprintf(stderr,"[ ERROR ] --- %s --- \"%s\" != \"%s\"", src2, des2, opcode);
|
||||
fprintf(stderr,"[ ERROR ] --- %s --- \"%s\" != \"%s\"", opcode, src2, des2);
|
||||
if (strcmp(src1, src2))
|
||||
fprintf(stderr, " (\"%s\" != \"%s\")", src1, des2);
|
||||
else if (strcmp(des1, des2))
|
||||
|
@ -99,7 +99,7 @@ static int quadruple_compare(const char *src1, const char *src2, const char *des
|
|||
return 1;
|
||||
}
|
||||
|
||||
void test_single_MC(csh *handle, char *line)
|
||||
void test_single_MC(csh *handle, int mc_mode, char *line)
|
||||
{
|
||||
char **list_part, **list_byte;//, **list_data;
|
||||
int size_part, size_byte, size_data, size_insn;
|
||||
|
@ -128,6 +128,7 @@ void test_single_MC(csh *handle, char *line)
|
|||
code[i] = (unsigned char)strtol(list_byte[i], NULL, 16);
|
||||
// printf("Byte: 0x%.2x\n", (int)code[i]);
|
||||
}
|
||||
cs_option(*handle, CS_OPT_UNSIGNED, CS_OPT_ON);
|
||||
|
||||
// list_data = split(list_part[1], ";", &size_data);
|
||||
count = cs_disasm(*handle, code, size_byte, offset, 0, &insn);
|
||||
|
@ -152,6 +153,7 @@ void test_single_MC(csh *handle, char *line)
|
|||
|
||||
for (p=list_part[1]; *p; ++p) *p = tolower(*p);
|
||||
trim_str(list_part[1]);
|
||||
// replace_negative(list_part[1], mc_mode);
|
||||
|
||||
// tmp = (char *)malloc(strlen(insn[0].mnemonic) + strlen(insn[0].op_str) + 100);
|
||||
strcpy(tmp, insn[0].mnemonic);
|
||||
|
@ -208,6 +210,7 @@ void test_single_MC(csh *handle, char *line)
|
|||
free(code);
|
||||
// free(list_data);
|
||||
cs_free(insn, count);
|
||||
_fail(__FILE__, __LINE__);
|
||||
}
|
||||
|
||||
free(list_part);
|
||||
|
|
|
@ -135,7 +135,65 @@ void replace_hex(char *src)
|
|||
add_str(&result, "%s", tmp);
|
||||
if (strlen(result) >= MAXMEM) {
|
||||
fprintf(stderr, "[ Error ] --- Buffer Overflow in replace_hex()\n");
|
||||
exit(-1);
|
||||
free(result);
|
||||
free(origin);
|
||||
_fail(__FILE__, __LINE__);
|
||||
}
|
||||
|
||||
strcpy(src, result);
|
||||
free(result);
|
||||
free(origin);
|
||||
}
|
||||
|
||||
void replace_negative(char *src, int mode)
|
||||
{
|
||||
char *tmp, *result, *found, *origin;
|
||||
int i, cnt;
|
||||
char *value;
|
||||
unsigned short int tmp_short;
|
||||
unsigned int tmp_int;
|
||||
unsigned long int tmp_long;
|
||||
|
||||
result = (char *)malloc(sizeof(char));
|
||||
result[0] = '\0';
|
||||
tmp = strdup(src);
|
||||
origin = tmp;
|
||||
|
||||
while ((found = strstr(tmp, "-")) != NULL) {
|
||||
*found = '\0';
|
||||
found ++;
|
||||
|
||||
value = strdup("-");
|
||||
cnt = 2;
|
||||
|
||||
while (*found != '\0' && isdigit(*found)) {
|
||||
value = (char *)realloc(value, cnt + 1);
|
||||
value[cnt - 1] = *found;
|
||||
value[cnt] = '\0';
|
||||
cnt ++;
|
||||
found++;
|
||||
}
|
||||
|
||||
if (mode == X86_16) {
|
||||
sscanf(value, "%hu", &tmp_short);
|
||||
add_str(&result, "%s%hu", tmp, tmp_short);
|
||||
} else if (mode == X86_32) {
|
||||
sscanf(value, "%u", &tmp_int);
|
||||
add_str(&result, "%s%u", tmp, tmp_int);
|
||||
} else if (mode == X86_64) {
|
||||
sscanf(value, "%lu", &tmp_long);
|
||||
add_str(&result, "%s%lu", tmp, tmp_long);
|
||||
}
|
||||
tmp = found;
|
||||
free(value);
|
||||
}
|
||||
|
||||
add_str(&result, "%s", tmp);
|
||||
if (strlen(result) >= MAXMEM) {
|
||||
fprintf(stderr, "[ Error ] --- Buffer Overflow in replace_negative()\n");
|
||||
free(result);
|
||||
free(origin);
|
||||
_fail(__FILE__, __LINE__);
|
||||
}
|
||||
|
||||
strcpy(src, result);
|
||||
|
|
|
@ -8,6 +8,7 @@ static int failed_setup;
|
|||
static int size_lines;
|
||||
static cs_mode issue_mode;
|
||||
static int getDetail;
|
||||
static int mc_mode;
|
||||
|
||||
static int setup_MC(void **state)
|
||||
{
|
||||
|
@ -58,7 +59,7 @@ static int setup_MC(void **state)
|
|||
|
||||
static void test_MC(void **state)
|
||||
{
|
||||
test_single_MC((csh *)*state, list_lines[counter]);
|
||||
test_single_MC((csh *)*state, mc_mode, list_lines[counter]);
|
||||
}
|
||||
|
||||
static int teardown_MC(void **state)
|
||||
|
|
Loading…
Reference in New Issue