You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
2182 lines
128 KiB
2182 lines
128 KiB
commit 1e2e8c529c1cf4fcc8cbae382aa0a653d0b65da6 |
|
Author: Andreas Krebbel <krebbel@linux.vnet.ibm.com> |
|
Date: Fri Jan 16 12:19:21 2015 +0100 |
|
|
|
S/390: Add support for IBM z13. |
|
|
|
- 32 128 bit vector registers (overlapping with the existing 16 64 bit |
|
floating point registers) |
|
- vector double instructions |
|
- vector integer instructions |
|
- scalar vector instructions (allowing to have more floating point |
|
registers for scalar operations) |
|
- vector string instructions |
|
|
|
gas/ChangeLog: |
|
|
|
* config/tc-s390.c (struct pd_reg): Remove. |
|
(pre_defined_registers): Remove. |
|
(REG_NAME_CNT): Remove. |
|
(reg_name_search): Calculate the register number instead of doing |
|
a lookup. |
|
(register_name, tc_s390_regname_to_dw2regnum): Adopt to the new |
|
reg_name_search signature. |
|
(s390_parse_cpu): Support the new arch string z13. |
|
(s390_insert_operand): Support for vector registers with the extra |
|
field for the fifth bit of each vector register operand. |
|
(md_gather_operand): Adjust to the new handling of optional |
|
parameters. |
|
|
|
* doc/as.texinfo: Document the z13 cpu string. |
|
|
|
gas/testsuite/ChangeLog: |
|
|
|
* gas/s390/esa-g5.d: Add a variant without the optional operand. |
|
* gas/s390/esa-g5.s: Likewise. |
|
* gas/s390/esa-z9-109.d: Likewise. |
|
* gas/s390/esa-z9-109.s: Likewise. |
|
* gas/s390/zarch-z9-109.d: Likewise. |
|
* gas/s390/zarch-z9-109.s: Likewise. |
|
* gas/s390/zarch-z10.d: For variants with a zero optional argument |
|
it is not dumped by objdump anymore. |
|
* gas/s390/zarch-zEC12.d: Likewise. |
|
|
|
* gas/s390/zarch-z13.d: New file. |
|
* gas/s390/zarch-z13.s: New file. |
|
* gas/s390/s390.exp: Run the test for the z13 files. |
|
|
|
include/opcode/ChangeLog: |
|
|
|
* s390.h (s390_opcode_cpu_val): Add S390_OPCODE_Z13. |
|
|
|
ld/testsuite/ChangeLog: |
|
|
|
* ld-s390/tlsbin.dd: The nopr register operand is optional and not |
|
printed if 0 anymore. |
|
|
|
opcodes/ChangeLog: |
|
|
|
* s390-dis.c (s390_extract_operand): Support vector register |
|
operands. |
|
(s390_print_insn_with_opcode): Support new operands types and add |
|
new handling of optional operands. |
|
* s390-mkopc.c (s390_opcode_mode_val, s390_opcode_cpu_val): Remove |
|
and include opcode/s390.h instead. |
|
(struct op_struct): New field `flags'. |
|
(insertOpcode, insertExpandedMnemonic): New parameter `flags'. |
|
(dumpTable): Dump flags. |
|
(main): Parse flags from the s390-opc.txt file. Add z13 as cpu |
|
string. |
|
* s390-opc.c: Add new operands types, instruction formats, and |
|
instruction masks. |
|
(s390_opformats): Add new formats for .insn. |
|
* s390-opc.txt: Add new instructions. |
|
|
|
### a/include/opcode/ChangeLog |
|
### b/include/opcode/ChangeLog |
|
## -1,3 +1,7 @@ |
|
+2015-01-16 Andreas Krebbel <krebbel@linux.vnet.ibm.com> |
|
+ |
|
+ * s390.h (s390_opcode_cpu_val): Add S390_OPCODE_Z13. |
|
+ |
|
2015-01-01 Alan Modra <amodra@gmail.com> |
|
|
|
Update year range in copyright notice of all files. |
|
--- a/include/opcode/s390.h |
|
+++ b/include/opcode/s390.h |
|
@@ -41,9 +41,13 @@ enum s390_opcode_cpu_val |
|
S390_OPCODE_Z10, |
|
S390_OPCODE_Z196, |
|
S390_OPCODE_ZEC12, |
|
+ S390_OPCODE_Z13, |
|
S390_OPCODE_MAXCPU |
|
}; |
|
|
|
+/* Instruction specific flags. */ |
|
+#define S390_INSTR_FLAG_OPTPARM 0x1 |
|
+ |
|
/* The opcode table is an array of struct s390_opcode. */ |
|
|
|
struct s390_opcode |
|
@@ -74,6 +78,9 @@ struct s390_opcode |
|
|
|
/* First cpu this opcode is available for. */ |
|
enum s390_opcode_cpu_val min_cpu; |
|
+ |
|
+ /* Instruction specific flags. */ |
|
+ unsigned int flags; |
|
}; |
|
|
|
/* The table itself is sorted by major opcode number, and is otherwise |
|
@@ -86,7 +93,7 @@ extern const int s390_num_opcodes; |
|
extern const struct s390_opcode s390_opformats[]; |
|
extern const int s390_num_opformats; |
|
|
|
-/* Values defined for the flags field of a struct powerpc_opcode. */ |
|
+/* Values defined for the flags field of a struct s390_opcode. */ |
|
|
|
/* The operands table is an array of struct s390_operand. */ |
|
|
|
@@ -103,7 +110,7 @@ struct s390_operand |
|
}; |
|
|
|
/* Elements in the table are retrieved by indexing with values from |
|
- the operands field of the powerpc_opcodes table. */ |
|
+ the operands field of the s390_opcodes table. */ |
|
|
|
extern const struct s390_operand s390_operands[]; |
|
|
|
@@ -151,4 +158,14 @@ extern const struct s390_operand s390_operands[]; |
|
/* The operand needs to be a valid GP or FP register pair. */ |
|
#define S390_OPERAND_REG_PAIR 0x800 |
|
|
|
- #endif /* S390_H */ |
|
+/* This operand names a vector register. The disassembler uses this |
|
+ to print register names with a leading 'v'. */ |
|
+#define S390_OPERAND_VR 0x1000 |
|
+ |
|
+#define S390_OPERAND_CP16 0x2000 |
|
+ |
|
+#define S390_OPERAND_OR1 0x4000 |
|
+#define S390_OPERAND_OR2 0x8000 |
|
+#define S390_OPERAND_OR8 0x10000 |
|
+ |
|
+#endif /* S390_H */ |
|
--- a/opcodes/s390-dis.c |
|
+++ b/opcodes/s390-dis.c |
|
@@ -107,6 +107,7 @@ s390_extract_operand (const bfd_byte *insn, |
|
union operand_value ret; |
|
unsigned int val; |
|
int bits; |
|
+ const bfd_byte *orig_insn = insn; |
|
|
|
/* Extract fragments of the operand byte for byte. */ |
|
insn += operand->shift / 8; |
|
@@ -140,6 +141,16 @@ s390_extract_operand (const bfd_byte *insn, |
|
else if (operand->flags & S390_OPERAND_LENGTH) |
|
/* Length x in an instruction has real length x + 1. */ |
|
ret.u = val + 1; |
|
+ |
|
+ else if (operand->flags & S390_OPERAND_VR) |
|
+ { |
|
+ /* Extract the extra bits for a vector register operand stored |
|
+ in the RXB field. */ |
|
+ unsigned vr = operand->shift == 32 ? 3 |
|
+ : (unsigned) operand->shift / 4 - 2; |
|
+ |
|
+ ret.u = val | ((orig_insn[4] & (1 << (3 - vr))) << (vr + 1)); |
|
+ } |
|
else |
|
ret.u = val; |
|
|
|
@@ -178,22 +189,45 @@ s390_print_insn_with_opcode (bfd_vma memaddr, |
|
continue; |
|
} |
|
|
|
- info->fprintf_func (info->stream, "%c", separator); |
|
+ /* For instructions with a last optional operand don't print it |
|
+ if zero. */ |
|
+ if ((opcode->flags & S390_INSTR_FLAG_OPTPARM) |
|
+ && val.u == 0 |
|
+ && opindex[1] == 0) |
|
+ break; |
|
|
|
if (flags & S390_OPERAND_GPR) |
|
- info->fprintf_func (info->stream, "%%r%u", val.u); |
|
+ info->fprintf_func (info->stream, "%c%%r%u", separator, val.u); |
|
else if (flags & S390_OPERAND_FPR) |
|
- info->fprintf_func (info->stream, "%%f%u", val.u); |
|
+ info->fprintf_func (info->stream, "%c%%f%u", separator, val.u); |
|
+ else if (flags & S390_OPERAND_VR) |
|
+ info->fprintf_func (info->stream, "%c%%v%i", separator, val.u); |
|
else if (flags & S390_OPERAND_AR) |
|
- info->fprintf_func (info->stream, "%%a%u", val.u); |
|
+ info->fprintf_func (info->stream, "%c%%a%u", separator, val.u); |
|
else if (flags & S390_OPERAND_CR) |
|
- info->fprintf_func (info->stream, "%%c%u", val.u); |
|
+ info->fprintf_func (info->stream, "%c%%c%u", separator, val.u); |
|
else if (flags & S390_OPERAND_PCREL) |
|
- info->print_address_func (memaddr + val.i + val.i, info); |
|
+ { |
|
+ info->fprintf_func (info->stream, "%c", separator); |
|
+ info->print_address_func (memaddr + val.i + val.i, info); |
|
+ } |
|
else if (flags & S390_OPERAND_SIGNED) |
|
- info->fprintf_func (info->stream, "%i", val.i); |
|
+ info->fprintf_func (info->stream, "%c%i", separator, val.i); |
|
else |
|
- info->fprintf_func (info->stream, "%u", val.u); |
|
+ { |
|
+ if (flags & S390_OPERAND_OR1) |
|
+ val.u &= ~1; |
|
+ if (flags & S390_OPERAND_OR2) |
|
+ val.u &= ~2; |
|
+ if (flags & S390_OPERAND_OR8) |
|
+ val.u &= ~8; |
|
+ |
|
+ if ((opcode->flags & S390_INSTR_FLAG_OPTPARM) |
|
+ && val.u == 0 |
|
+ && opindex[1] == 0) |
|
+ break; |
|
+ info->fprintf_func (info->stream, "%c%u", separator, val.u); |
|
+ } |
|
|
|
if (flags & S390_OPERAND_DISP) |
|
separator = '('; |
|
--- a/opcodes/s390-mkopc.c |
|
+++ b/opcodes/s390-mkopc.c |
|
@@ -22,26 +22,7 @@ |
|
#include <stdio.h> |
|
#include <stdlib.h> |
|
#include <string.h> |
|
- |
|
-/* Taken from opcodes/s390.h */ |
|
-enum s390_opcode_mode_val |
|
- { |
|
- S390_OPCODE_ESA = 0, |
|
- S390_OPCODE_ZARCH |
|
- }; |
|
- |
|
-enum s390_opcode_cpu_val |
|
- { |
|
- S390_OPCODE_G5 = 0, |
|
- S390_OPCODE_G6, |
|
- S390_OPCODE_Z900, |
|
- S390_OPCODE_Z990, |
|
- S390_OPCODE_Z9_109, |
|
- S390_OPCODE_Z9_EC, |
|
- S390_OPCODE_Z10, |
|
- S390_OPCODE_Z196, |
|
- S390_OPCODE_ZEC12 |
|
- }; |
|
+#include "opcode/s390.h" |
|
|
|
struct op_struct |
|
{ |
|
@@ -50,6 +31,7 @@ struct op_struct |
|
char format[16]; |
|
int mode_bits; |
|
int min_cpu; |
|
+ int flags; |
|
|
|
unsigned long long sort_value; |
|
int no_nibbles; |
|
@@ -71,7 +53,7 @@ createTable (void) |
|
|
|
static void |
|
insertOpcode (char *opcode, char *mnemonic, char *format, |
|
- int min_cpu, int mode_bits) |
|
+ int min_cpu, int mode_bits, int flags) |
|
{ |
|
char *str; |
|
unsigned long long sort_value; |
|
@@ -115,6 +97,7 @@ insertOpcode (char *opcode, char *mnemonic, char *format, |
|
op_array[ix].no_nibbles = no_nibbles; |
|
op_array[ix].min_cpu = min_cpu; |
|
op_array[ix].mode_bits = mode_bits; |
|
+ op_array[ix].flags = flags; |
|
no_ops++; |
|
} |
|
|
|
@@ -176,7 +159,7 @@ const struct s390_cond_ext_format s390_crb_extensions[NUM_CRB_EXTENSIONS] = |
|
|
|
static void |
|
insertExpandedMnemonic (char *opcode, char *mnemonic, char *format, |
|
- int min_cpu, int mode_bits) |
|
+ int min_cpu, int mode_bits, int flags) |
|
{ |
|
char *tag; |
|
char prefix[15]; |
|
@@ -189,7 +172,7 @@ insertExpandedMnemonic (char *opcode, char *mnemonic, char *format, |
|
|
|
if (!(tag = strpbrk (mnemonic, "*$"))) |
|
{ |
|
- insertOpcode (opcode, mnemonic, format, min_cpu, mode_bits); |
|
+ insertOpcode (opcode, mnemonic, format, min_cpu, mode_bits, flags); |
|
return; |
|
} |
|
|
|
@@ -268,7 +251,7 @@ insertExpandedMnemonic (char *opcode, char *mnemonic, char *format, |
|
opcode[mask_start] = ext_table[i].nibble; |
|
strcat (new_mnemonic, ext_table[i].extension); |
|
strcat (new_mnemonic, suffix); |
|
- insertOpcode (opcode, new_mnemonic, format, min_cpu, mode_bits); |
|
+ insertOpcode (opcode, new_mnemonic, format, min_cpu, mode_bits, flags); |
|
} |
|
return; |
|
|
|
@@ -286,7 +269,10 @@ static const char file_header[] = |
|
" which bits in the actual opcode must match OPCODE.\n" |
|
" OPERANDS is the list of operands.\n\n" |
|
" The disassembler reads the table in order and prints the first\n" |
|
- " instruction which matches. */\n\n" |
|
+ " instruction which matches.\n" |
|
+ " MODE_BITS - zarch or esa\n" |
|
+ " MIN_CPU - number of the min cpu level required\n" |
|
+ " FLAGS - instruction flags. */\n\n" |
|
"const struct s390_opcode s390_opcodes[] =\n {\n"; |
|
|
|
/* `dumpTable': write opcode table. */ |
|
@@ -311,7 +297,8 @@ dumpTable (void) |
|
printf ("MASK_%s, INSTR_%s, ", |
|
op_array[ix].format, op_array[ix].format); |
|
printf ("%i, ", op_array[ix].mode_bits); |
|
- printf ("%i}", op_array[ix].min_cpu); |
|
+ printf ("%i, ", op_array[ix].min_cpu); |
|
+ printf ("%i}", op_array[ix].flags); |
|
if (ix < no_ops-1) |
|
printf (",\n"); |
|
else |
|
@@ -339,67 +326,91 @@ main (void) |
|
char description[80]; |
|
char cpu_string[16]; |
|
char modes_string[16]; |
|
+ char flags_string[80]; |
|
int min_cpu; |
|
int mode_bits; |
|
+ int flag_bits; |
|
+ int num_matched; |
|
char *str; |
|
|
|
if (currentLine[0] == '#' || currentLine[0] == '\n') |
|
continue; |
|
memset (opcode, 0, 8); |
|
- if (sscanf (currentLine, "%15s %15s %15s \"%79[^\"]\" %15s %15s", |
|
- opcode, mnemonic, format, description, |
|
- cpu_string, modes_string) == 6) |
|
+ num_matched = |
|
+ sscanf (currentLine, "%15s %15s %15s \"%79[^\"]\" %15s %15s %79[^\n]", |
|
+ opcode, mnemonic, format, description, |
|
+ cpu_string, modes_string, flags_string); |
|
+ if (num_matched != 6 && num_matched != 7) |
|
{ |
|
- if (strcmp (cpu_string, "g5") == 0) |
|
- min_cpu = S390_OPCODE_G5; |
|
- else if (strcmp (cpu_string, "g6") == 0) |
|
- min_cpu = S390_OPCODE_G6; |
|
- else if (strcmp (cpu_string, "z900") == 0) |
|
- min_cpu = S390_OPCODE_Z900; |
|
- else if (strcmp (cpu_string, "z990") == 0) |
|
- min_cpu = S390_OPCODE_Z990; |
|
- else if (strcmp (cpu_string, "z9-109") == 0) |
|
- min_cpu = S390_OPCODE_Z9_109; |
|
- else if (strcmp (cpu_string, "z9-ec") == 0) |
|
- min_cpu = S390_OPCODE_Z9_EC; |
|
- else if (strcmp (cpu_string, "z10") == 0) |
|
- min_cpu = S390_OPCODE_Z10; |
|
- else if (strcmp (cpu_string, "z196") == 0) |
|
- min_cpu = S390_OPCODE_Z196; |
|
- else if (strcmp (cpu_string, "zEC12") == 0) |
|
- min_cpu = S390_OPCODE_ZEC12; |
|
- else { |
|
- fprintf (stderr, "Couldn't parse cpu string %s\n", cpu_string); |
|
- exit (1); |
|
- } |
|
+ fprintf (stderr, "Couldn't scan line %s\n", currentLine); |
|
+ exit (1); |
|
+ } |
|
|
|
- str = modes_string; |
|
- mode_bits = 0; |
|
+ if (strcmp (cpu_string, "g5") == 0) |
|
+ min_cpu = S390_OPCODE_G5; |
|
+ else if (strcmp (cpu_string, "g6") == 0) |
|
+ min_cpu = S390_OPCODE_G6; |
|
+ else if (strcmp (cpu_string, "z900") == 0) |
|
+ min_cpu = S390_OPCODE_Z900; |
|
+ else if (strcmp (cpu_string, "z990") == 0) |
|
+ min_cpu = S390_OPCODE_Z990; |
|
+ else if (strcmp (cpu_string, "z9-109") == 0) |
|
+ min_cpu = S390_OPCODE_Z9_109; |
|
+ else if (strcmp (cpu_string, "z9-ec") == 0) |
|
+ min_cpu = S390_OPCODE_Z9_EC; |
|
+ else if (strcmp (cpu_string, "z10") == 0) |
|
+ min_cpu = S390_OPCODE_Z10; |
|
+ else if (strcmp (cpu_string, "z196") == 0) |
|
+ min_cpu = S390_OPCODE_Z196; |
|
+ else if (strcmp (cpu_string, "zEC12") == 0) |
|
+ min_cpu = S390_OPCODE_ZEC12; |
|
+ else if (strcmp (cpu_string, "z13") == 0) |
|
+ min_cpu = S390_OPCODE_Z13; |
|
+ else { |
|
+ fprintf (stderr, "Couldn't parse cpu string %s\n", cpu_string); |
|
+ exit (1); |
|
+ } |
|
+ |
|
+ str = modes_string; |
|
+ mode_bits = 0; |
|
+ do { |
|
+ if (strncmp (str, "esa", 3) == 0 |
|
+ && (str[3] == 0 || str[3] == ',')) { |
|
+ mode_bits |= 1 << S390_OPCODE_ESA; |
|
+ str += 3; |
|
+ } else if (strncmp (str, "zarch", 5) == 0 |
|
+ && (str[5] == 0 || str[5] == ',')) { |
|
+ mode_bits |= 1 << S390_OPCODE_ZARCH; |
|
+ str += 5; |
|
+ } else { |
|
+ fprintf (stderr, "Couldn't parse modes string %s\n", |
|
+ modes_string); |
|
+ exit (1); |
|
+ } |
|
+ if (*str == ',') |
|
+ str++; |
|
+ } while (*str != 0); |
|
+ |
|
+ flag_bits = 0; |
|
+ |
|
+ if (num_matched == 7) |
|
+ { |
|
+ str = flags_string; |
|
do { |
|
- if (strncmp (str, "esa", 3) == 0 |
|
- && (str[3] == 0 || str[3] == ',')) { |
|
- mode_bits |= 1 << S390_OPCODE_ESA; |
|
- str += 3; |
|
- } else if (strncmp (str, "zarch", 5) == 0 |
|
- && (str[5] == 0 || str[5] == ',')) { |
|
- mode_bits |= 1 << S390_OPCODE_ZARCH; |
|
- str += 5; |
|
+ if (strncmp (str, "optparm", 7) == 0 |
|
+ && (str[7] == 0 || str[7] == ',')) { |
|
+ flag_bits |= S390_INSTR_FLAG_OPTPARM; |
|
+ str += 7; |
|
} else { |
|
- fprintf (stderr, "Couldn't parse modes string %s\n", |
|
- modes_string); |
|
+ fprintf (stderr, "Couldn't parse flags string %s\n", |
|
+ flags_string); |
|
exit (1); |
|
} |
|
if (*str == ',') |
|
str++; |
|
} while (*str != 0); |
|
- |
|
- insertExpandedMnemonic (opcode, mnemonic, format, min_cpu, mode_bits); |
|
- } |
|
- else |
|
- { |
|
- fprintf (stderr, "Couldn't scan line %s\n", currentLine); |
|
- exit (1); |
|
} |
|
+ insertExpandedMnemonic (opcode, mnemonic, format, min_cpu, mode_bits, flag_bits); |
|
} |
|
|
|
dumpTable (); |
|
--- a/opcodes/s390-opc.c |
|
+++ b/opcodes/s390-opc.c |
|
@@ -44,197 +44,210 @@ const struct s390_operand s390_operands[] = |
|
|
|
/* General purpose register operands. */ |
|
|
|
-#define R_8 1 /* GPR starting at position 8 */ |
|
+#define R_8 1 /* GPR starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_GPR }, |
|
-#define R_12 2 /* GPR starting at position 12 */ |
|
+#define R_12 2 /* GPR starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_GPR }, |
|
-#define RO_12 3 /* optional GPR starting at position 12 */ |
|
- { 4, 12, S390_OPERAND_GPR | S390_OPERAND_OPTIONAL }, |
|
-#define R_16 4 /* GPR starting at position 16 */ |
|
+#define R_16 3 /* GPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_GPR }, |
|
-#define R_20 5 /* GPR starting at position 20 */ |
|
+#define R_20 4 /* GPR starting at position 20 */ |
|
{ 4, 20, S390_OPERAND_GPR }, |
|
-#define R_24 6 /* GPR starting at position 24 */ |
|
+#define R_24 5 /* GPR starting at position 24 */ |
|
{ 4, 24, S390_OPERAND_GPR }, |
|
-#define R_28 7 /* GPR starting at position 28 */ |
|
+#define R_28 6 /* GPR starting at position 28 */ |
|
{ 4, 28, S390_OPERAND_GPR }, |
|
-#define RO_28 8 /* optional GPR starting at position 28 */ |
|
- { 4, 28, (S390_OPERAND_GPR | S390_OPERAND_OPTIONAL) }, |
|
-#define R_32 9 /* GPR starting at position 32 */ |
|
+#define R_32 7 /* GPR starting at position 32 */ |
|
{ 4, 32, S390_OPERAND_GPR }, |
|
|
|
/* General purpose register pair operands. */ |
|
|
|
-#define RE_8 10 /* GPR starting at position 8 */ |
|
+#define RE_8 8 /* GPR starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_12 11 /* GPR starting at position 12 */ |
|
+#define RE_12 9 /* GPR starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_16 12 /* GPR starting at position 16 */ |
|
+#define RE_16 10 /* GPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_20 13 /* GPR starting at position 20 */ |
|
+#define RE_20 11 /* GPR starting at position 20 */ |
|
{ 4, 20, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_24 14 /* GPR starting at position 24 */ |
|
+#define RE_24 12 /* GPR starting at position 24 */ |
|
{ 4, 24, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_28 15 /* GPR starting at position 28 */ |
|
+#define RE_28 13 /* GPR starting at position 28 */ |
|
{ 4, 28, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
-#define RE_32 16 /* GPR starting at position 32 */ |
|
+#define RE_32 14 /* GPR starting at position 32 */ |
|
{ 4, 32, S390_OPERAND_GPR | S390_OPERAND_REG_PAIR }, |
|
|
|
- |
|
/* Floating point register operands. */ |
|
|
|
-#define F_8 17 /* FPR starting at position 8 */ |
|
+#define F_8 15 /* FPR starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_FPR }, |
|
-#define F_12 18 /* FPR starting at position 12 */ |
|
+#define F_12 16 /* FPR starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_FPR }, |
|
-#define F_16 19 /* FPR starting at position 16 */ |
|
+#define F_16 17 /* FPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_FPR }, |
|
-#define F_20 20 /* FPR starting at position 16 */ |
|
+#define F_20 18 /* FPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_FPR }, |
|
-#define F_24 21 /* FPR starting at position 24 */ |
|
+#define F_24 19 /* FPR starting at position 24 */ |
|
{ 4, 24, S390_OPERAND_FPR }, |
|
-#define F_28 22 /* FPR starting at position 28 */ |
|
+#define F_28 20 /* FPR starting at position 28 */ |
|
{ 4, 28, S390_OPERAND_FPR }, |
|
-#define F_32 23 /* FPR starting at position 32 */ |
|
+#define F_32 21 /* FPR starting at position 32 */ |
|
{ 4, 32, S390_OPERAND_FPR }, |
|
|
|
/* Floating point register pair operands. */ |
|
|
|
-#define FE_8 24 /* FPR starting at position 8 */ |
|
+#define FE_8 22 /* FPR starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_12 25 /* FPR starting at position 12 */ |
|
+#define FE_12 23 /* FPR starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_16 26 /* FPR starting at position 16 */ |
|
+#define FE_16 24 /* FPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_20 27 /* FPR starting at position 16 */ |
|
+#define FE_20 25 /* FPR starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_24 28 /* FPR starting at position 24 */ |
|
+#define FE_24 26 /* FPR starting at position 24 */ |
|
{ 4, 24, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_28 29 /* FPR starting at position 28 */ |
|
+#define FE_28 27 /* FPR starting at position 28 */ |
|
{ 4, 28, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
-#define FE_32 30 /* FPR starting at position 32 */ |
|
+#define FE_32 28 /* FPR starting at position 32 */ |
|
{ 4, 32, S390_OPERAND_FPR | S390_OPERAND_REG_PAIR }, |
|
|
|
+/* Vector register operands. */ |
|
+ |
|
+/* For each of these operands and additional bit in the RXB operand is |
|
+ needed. */ |
|
+ |
|
+#define V_8 29 /* Vector reg. starting at position 8 */ |
|
+ { 4, 8, S390_OPERAND_VR }, |
|
+#define V_12 30 /* Vector reg. starting at position 12 */ |
|
+ { 4, 12, S390_OPERAND_VR }, |
|
+#define V_CP16_12 31 /* Vector reg. starting at position 12 */ |
|
+ { 4, 12, S390_OPERAND_VR | S390_OPERAND_CP16 }, /* with a copy at pos 16 */ |
|
+#define V_16 32 /* Vector reg. starting at position 16 */ |
|
+ { 4, 16, S390_OPERAND_VR }, |
|
+#define V_32 33 /* Vector reg. starting at position 32 */ |
|
+ { 4, 32, S390_OPERAND_VR }, |
|
|
|
/* Access register operands. */ |
|
|
|
-#define A_8 31 /* Access reg. starting at position 8 */ |
|
+#define A_8 34 /* Access reg. starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_AR }, |
|
-#define A_12 32 /* Access reg. starting at position 12 */ |
|
+#define A_12 35 /* Access reg. starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_AR }, |
|
-#define A_24 33 /* Access reg. starting at position 24 */ |
|
+#define A_24 36 /* Access reg. starting at position 24 */ |
|
{ 4, 24, S390_OPERAND_AR }, |
|
-#define A_28 34 /* Access reg. starting at position 28 */ |
|
+#define A_28 37 /* Access reg. starting at position 28 */ |
|
{ 4, 28, S390_OPERAND_AR }, |
|
|
|
/* Control register operands. */ |
|
|
|
-#define C_8 35 /* Control reg. starting at position 8 */ |
|
+#define C_8 38 /* Control reg. starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_CR }, |
|
-#define C_12 36 /* Control reg. starting at position 12 */ |
|
+#define C_12 39 /* Control reg. starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_CR }, |
|
|
|
/* Base register operands. */ |
|
|
|
-#define B_16 37 /* Base register starting at position 16 */ |
|
+#define B_16 40 /* Base register starting at position 16 */ |
|
{ 4, 16, S390_OPERAND_BASE | S390_OPERAND_GPR }, |
|
-#define B_32 38 /* Base register starting at position 32 */ |
|
+#define B_32 41 /* Base register starting at position 32 */ |
|
{ 4, 32, S390_OPERAND_BASE | S390_OPERAND_GPR }, |
|
|
|
-#define X_12 39 /* Index register starting at position 12 */ |
|
+#define X_12 42 /* Index register starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_INDEX | S390_OPERAND_GPR }, |
|
|
|
+#define VX_12 43 /* Vector index register starting at position 12 */ |
|
+ { 4, 12, S390_OPERAND_INDEX | S390_OPERAND_VR }, |
|
+ |
|
/* Address displacement operands. */ |
|
|
|
-#define D_20 40 /* Displacement starting at position 20 */ |
|
+#define D_20 44 /* Displacement starting at position 20 */ |
|
{ 12, 20, S390_OPERAND_DISP }, |
|
-#define DO_20 41 /* optional Displ. starting at position 20 */ |
|
- { 12, 20, S390_OPERAND_DISP | S390_OPERAND_OPTIONAL }, |
|
-#define D_36 42 /* Displacement starting at position 36 */ |
|
+#define D_36 45 /* Displacement starting at position 36 */ |
|
{ 12, 36, S390_OPERAND_DISP }, |
|
-#define D20_20 43 /* 20 bit displacement starting at 20 */ |
|
+#define D20_20 46 /* 20 bit displacement starting at 20 */ |
|
{ 20, 20, S390_OPERAND_DISP | S390_OPERAND_SIGNED }, |
|
|
|
/* Length operands. */ |
|
|
|
-#define L4_8 44 /* 4 bit length starting at position 8 */ |
|
+#define L4_8 47 /* 4 bit length starting at position 8 */ |
|
{ 4, 8, S390_OPERAND_LENGTH }, |
|
-#define L4_12 45 /* 4 bit length starting at position 12 */ |
|
+#define L4_12 48 /* 4 bit length starting at position 12 */ |
|
{ 4, 12, S390_OPERAND_LENGTH }, |
|
-#define L8_8 46 /* 8 bit length starting at position 8 */ |
|
+#define L8_8 49 /* 8 bit length starting at position 8 */ |
|
{ 8, 8, S390_OPERAND_LENGTH }, |
|
|
|
/* Signed immediate operands. */ |
|
|
|
-#define I8_8 47 /* 8 bit signed value starting at 8 */ |
|
+#define I8_8 50 /* 8 bit signed value starting at 8 */ |
|
{ 8, 8, S390_OPERAND_SIGNED }, |
|
-#define I8_32 48 /* 8 bit signed value starting at 32 */ |
|
+#define I8_32 51 /* 8 bit signed value starting at 32 */ |
|
{ 8, 32, S390_OPERAND_SIGNED }, |
|
-#define I12_12 49 /* 12 bit signed value starting at 12 */ |
|
+#define I12_12 52 /* 12 bit signed value starting at 12 */ |
|
{ 12, 12, S390_OPERAND_SIGNED }, |
|
-#define I16_16 50 /* 16 bit signed value starting at 16 */ |
|
+#define I16_16 53 /* 16 bit signed value starting at 16 */ |
|
{ 16, 16, S390_OPERAND_SIGNED }, |
|
-#define I16_32 51 /* 16 bit signed value starting at 32 */ |
|
+#define I16_32 54 /* 16 bit signed value starting at 32 */ |
|
{ 16, 32, S390_OPERAND_SIGNED }, |
|
-#define I24_24 52 /* 24 bit signed value starting at 24 */ |
|
+#define I24_24 55 /* 24 bit signed value starting at 24 */ |
|
{ 24, 24, S390_OPERAND_SIGNED }, |
|
-#define I32_16 53 /* 32 bit signed value starting at 16 */ |
|
+#define I32_16 56 /* 32 bit signed value starting at 16 */ |
|
{ 32, 16, S390_OPERAND_SIGNED }, |
|
|
|
/* Unsigned immediate operands. */ |
|
|
|
-#define U4_8 54 /* 4 bit unsigned value starting at 8 */ |
|
+#define U4_8 57 /* 4 bit unsigned value starting at 8 */ |
|
{ 4, 8, 0 }, |
|
-#define U4_12 55 /* 4 bit unsigned value starting at 12 */ |
|
+#define U4_12 58 /* 4 bit unsigned value starting at 12 */ |
|
{ 4, 12, 0 }, |
|
-#define U4_16 56 /* 4 bit unsigned value starting at 16 */ |
|
+#define U4_16 59 /* 4 bit unsigned value starting at 16 */ |
|
{ 4, 16, 0 }, |
|
-#define U4_20 57 /* 4 bit unsigned value starting at 20 */ |
|
+#define U4_20 60 /* 4 bit unsigned value starting at 20 */ |
|
{ 4, 20, 0 }, |
|
-#define U4_24 58 /* 4 bit unsigned value starting at 24 */ |
|
+#define U4_24 61 /* 4 bit unsigned value starting at 24 */ |
|
{ 4, 24, 0 }, |
|
-#define U4_28 59 /* 4 bit unsigned value starting at 28 */ |
|
+#define U4_OR1_24 62 /* 4 bit unsigned value starting at 24 */ |
|
+ { 4, 24, S390_OPERAND_OR1 }, |
|
+#define U4_OR2_24 63 /* 4 bit unsigned value starting at 24 */ |
|
+ { 4, 24, S390_OPERAND_OR2 }, |
|
+#define U4_OR3_24 64 /* 4 bit unsigned value starting at 24 */ |
|
+ { 4, 24, S390_OPERAND_OR1 | S390_OPERAND_OR2 }, |
|
+#define U4_28 65 /* 4 bit unsigned value starting at 28 */ |
|
{ 4, 28, 0 }, |
|
-#define U4_32 60 /* 4 bit unsigned value starting at 32 */ |
|
+#define U4_OR8_28 66 |
|
+ { 4, 28, S390_OPERAND_OR8 }, |
|
+#define U4_32 67 /* 4 bit unsigned value starting at 32 */ |
|
{ 4, 32, 0 }, |
|
-#define U4_36 61 /* 4 bit unsigned value starting at 36 */ |
|
+#define U4_36 68 /* 4 bit unsigned value starting at 36 */ |
|
{ 4, 36, 0 }, |
|
-#define U8_8 62 /* 8 bit unsigned value starting at 8 */ |
|
+#define U8_8 69 /* 8 bit unsigned value starting at 8 */ |
|
{ 8, 8, 0 }, |
|
-#define U8_16 63 /* 8 bit unsigned value starting at 16 */ |
|
+#define U8_16 70 /* 8 bit unsigned value starting at 16 */ |
|
{ 8, 16, 0 }, |
|
-#define U8_24 64 /* 8 bit unsigned value starting at 24 */ |
|
+#define U8_24 71 /* 8 bit unsigned value starting at 24 */ |
|
{ 8, 24, 0 }, |
|
-#define U8_32 65 /* 8 bit unsigned value starting at 32 */ |
|
+#define U8_32 72 /* 8 bit unsigned value starting at 32 */ |
|
{ 8, 32, 0 }, |
|
-#define U16_16 66 /* 16 bit unsigned value starting at 16 */ |
|
+#define U12_16 73 /* 12 bit unsigned value starting at 16 */ |
|
+ { 12, 16, 0 }, |
|
+#define U16_16 74 /* 16 bit unsigned value starting at 16 */ |
|
{ 16, 16, 0 }, |
|
-#define U16_32 67 /* 16 bit unsigned value starting at 32 */ |
|
+#define U16_32 75 /* 16 bit unsigned value starting at 32 */ |
|
{ 16, 32, 0 }, |
|
-#define U32_16 68 /* 32 bit unsigned value starting at 16 */ |
|
+#define U32_16 76 /* 32 bit unsigned value starting at 16 */ |
|
{ 32, 16, 0 }, |
|
|
|
/* PC-relative address operands. */ |
|
|
|
-#define J12_12 69 /* 12 bit PC relative offset at 12 */ |
|
+#define J12_12 77 /* 12 bit PC relative offset at 12 */ |
|
{ 12, 12, S390_OPERAND_PCREL }, |
|
-#define J16_16 70 /* 16 bit PC relative offset at 16 */ |
|
+#define J16_16 78 /* 16 bit PC relative offset at 16 */ |
|
{ 16, 16, S390_OPERAND_PCREL }, |
|
-#define J16_32 71 /* 16 bit PC relative offset at 32 */ |
|
+#define J16_32 79 /* 24 bit PC relative offset at 24 */ |
|
{ 16, 32, S390_OPERAND_PCREL }, |
|
-#define J24_24 72 /* 24 bit PC relative offset at 24 */ |
|
+#define J24_24 80 /* 24 bit PC relative offset at 24 */ |
|
{ 24, 24, S390_OPERAND_PCREL }, |
|
-#define J32_16 73 /* 32 bit PC relative offset at 16 */ |
|
+#define J32_16 81 /* 32 bit PC relative offset at 16 */ |
|
{ 32, 16, S390_OPERAND_PCREL }, |
|
|
|
- |
|
-/* Conditional mask operands. */ |
|
- |
|
-#define M_16OPT 74 /* 4 bit optional mask starting at 16 */ |
|
- { 4, 16, S390_OPERAND_OPTIONAL }, |
|
-#define M_20OPT 75 /* 4 bit optional mask starting at 20 */ |
|
- { 4, 20, S390_OPERAND_OPTIONAL }, |
|
- |
|
}; |
|
|
|
|
|
@@ -244,7 +257,7 @@ const struct s390_operand s390_operands[] = |
|
#define OP8(x) { x, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
#define OP16(x) { x >> 8, x & 255, 0x00, 0x00, 0x00, 0x00 } |
|
#define OP48(x) { x >> 40, (x >> 32) & 255, (x >> 24) & 255, \ |
|
- (x >> 16) & 255, (x >> 8) & 255, x & 255} |
|
+ (x >> 16) & 255, (x >> 8) & 255, x & 255} |
|
|
|
/* The new format of the INSTR_x_y and MASK_x_y defines is based |
|
on the following rules: |
|
@@ -263,7 +276,6 @@ const struct s390_operand s390_operands[] = |
|
l - length, 4 or 8 bit |
|
p - pc relative |
|
r - general purpose register |
|
- ro - optional register operand |
|
re - gpr extended operand, a valid general purpose register pair |
|
u - unsigned integer, 4, 8, 16 or 32 bit |
|
m - mode field, 4 bit |
|
@@ -274,7 +286,7 @@ const struct s390_operand s390_operands[] = |
|
quite close. |
|
|
|
For example the instruction "mvo" is defined in the PoP as follows: |
|
- |
|
+ |
|
MVO D1(L1,B1),D2(L2,B2) [SS] |
|
|
|
-------------------------------------- |
|
@@ -284,358 +296,443 @@ const struct s390_operand s390_operands[] = |
|
|
|
The instruction format is: INSTR_SS_LLRDRD / MASK_SS_LLRDRD. */ |
|
|
|
-#define INSTR_E 2, { 0,0,0,0,0,0 } /* e.g. pr */ |
|
-#define INSTR_IE_UU 4, { U4_24,U4_28,0,0,0,0 } /* e.g. niai */ |
|
-#define INSTR_MII_UPP 6, { U4_8,J12_12,J24_24 } /* e.g. bprp */ |
|
-#define INSTR_RIE_RRP 6, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxhg */ |
|
-#define INSTR_RIE_RRPU 6, { R_8,R_12,U4_32,J16_16,0,0 } /* e.g. crj */ |
|
-#define INSTR_RIE_RRP0 6, { R_8,R_12,J16_16,0,0,0 } /* e.g. crjne */ |
|
-#define INSTR_RIE_RRI0 6, { R_8,R_12,I16_16,0,0,0 } /* e.g. ahik */ |
|
-#define INSTR_RIE_RUPI 6, { R_8,I8_32,U4_12,J16_16,0,0 } /* e.g. cij */ |
|
-#define INSTR_RIE_R0PI 6, { R_8,I8_32,J16_16,0,0,0 } /* e.g. cijne */ |
|
-#define INSTR_RIE_RUPU 6, { R_8,U8_32,U4_12,J16_16,0,0 } /* e.g. clij */ |
|
-#define INSTR_RIE_R0PU 6, { R_8,U8_32,J16_16,0,0,0 } /* e.g. clijne */ |
|
-#define INSTR_RIE_R0IU 6, { R_8,I16_16,U4_32,0,0,0 } /* e.g. cit */ |
|
-#define INSTR_RIE_R0I0 6, { R_8,I16_16,0,0,0,0 } /* e.g. citne */ |
|
-#define INSTR_RIE_R0UU 6, { R_8,U16_16,U4_32,0,0,0 } /* e.g. clfit */ |
|
-#define INSTR_RIE_R0U0 6, { R_8,U16_16,0,0,0,0 } /* e.g. clfitne */ |
|
-#define INSTR_RIE_RRUUU 6, { R_8,R_12,U8_16,U8_24,U8_32,0 } /* e.g. rnsbg */ |
|
-#define INSTR_RIL_0P 6, { J32_16,0,0,0,0 } /* e.g. jg */ |
|
-#define INSTR_RIL_RP 6, { R_8,J32_16,0,0,0,0 } /* e.g. brasl */ |
|
-#define INSTR_RIL_UP 6, { U4_8,J32_16,0,0,0,0 } /* e.g. brcl */ |
|
-#define INSTR_RIL_RI 6, { R_8,I32_16,0,0,0,0 } /* e.g. afi */ |
|
-#define INSTR_RIL_RU 6, { R_8,U32_16,0,0,0,0 } /* e.g. alfi */ |
|
-#define INSTR_RI_0P 4, { J16_16,0,0,0,0,0 } /* e.g. j */ |
|
-#define INSTR_RI_RI 4, { R_8,I16_16,0,0,0,0 } /* e.g. ahi */ |
|
-#define INSTR_RI_RP 4, { R_8,J16_16,0,0,0,0 } /* e.g. brct */ |
|
-#define INSTR_RI_RU 4, { R_8,U16_16,0,0,0,0 } /* e.g. tml */ |
|
-#define INSTR_RI_UP 4, { U4_8,J16_16,0,0,0,0 } /* e.g. brc */ |
|
-#define INSTR_RIS_RURDI 6, { R_8,I8_32,U4_12,D_20,B_16,0 } /* e.g. cib */ |
|
-#define INSTR_RIS_R0RDI 6, { R_8,I8_32,D_20,B_16,0,0 } /* e.g. cibne */ |
|
-#define INSTR_RIS_RURDU 6, { R_8,U8_32,U4_12,D_20,B_16,0 } /* e.g. clib */ |
|
-#define INSTR_RIS_R0RDU 6, { R_8,U8_32,D_20,B_16,0,0 } /* e.g. clibne*/ |
|
-#define INSTR_RRE_00 4, { 0,0,0,0,0,0 } /* e.g. palb */ |
|
-#define INSTR_RRE_0R 4, { R_28,0,0,0,0,0 } /* e.g. tb */ |
|
-#define INSTR_RRE_AA 4, { A_24,A_28,0,0,0,0 } /* e.g. cpya */ |
|
-#define INSTR_RRE_AR 4, { A_24,R_28,0,0,0,0 } /* e.g. sar */ |
|
-#define INSTR_RRE_F0 4, { F_24,0,0,0,0,0 } /* e.g. sqer */ |
|
-#define INSTR_RRE_FE0 4, { FE_24,0,0,0,0,0 } /* e.g. lzxr */ |
|
-#define INSTR_RRE_FF 4, { F_24,F_28,0,0,0,0 } /* e.g. debr */ |
|
-#define INSTR_RRE_FEF 4, { FE_24,F_28,0,0,0,0 } /* e.g. lxdbr */ |
|
-#define INSTR_RRE_FFE 4, { F_24,FE_28,0,0,0,0 } /* e.g. lexr */ |
|
-#define INSTR_RRE_FEFE 4, { FE_24,FE_28,0,0,0,0 } /* e.g. dxr */ |
|
-#define INSTR_RRE_R0 4, { R_24,0,0,0,0,0 } /* e.g. ipm */ |
|
-#define INSTR_RRE_RA 4, { R_24,A_28,0,0,0,0 } /* e.g. ear */ |
|
-#define INSTR_RRE_RF 4, { R_24,F_28,0,0,0,0 } /* e.g. cefbr */ |
|
-#define INSTR_RRE_RFE 4, { R_24,FE_28,0,0,0,0 } /* e.g. csxtr */ |
|
-#define INSTR_RRE_RR 4, { R_24,R_28,0,0,0,0 } /* e.g. lura */ |
|
-#define INSTR_RRE_RER 4, { RE_24,R_28,0,0,0,0 } /* e.g. tre */ |
|
-#define INSTR_RRE_RERE 4, { RE_24,RE_28,0,0,0,0 } /* e.g. cuse */ |
|
-#define INSTR_RRE_FR 4, { F_24,R_28,0,0,0,0 } /* e.g. ldgr */ |
|
-#define INSTR_RRE_FER 4, { FE_24,R_28,0,0,0,0 } /* e.g. cxfbr */ |
|
-/* Actually efpc and sfpc do not take an optional operand. |
|
- This is just a workaround for existing code e.g. glibc. */ |
|
-#define INSTR_RRE_RR_OPT 4, { R_24,RO_28,0,0,0,0 } /* efpc, sfpc */ |
|
-#define INSTR_RRF_F0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. madbr */ |
|
-#define INSTR_RRF_FE0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. myr */ |
|
-#define INSTR_RRF_F0FF2 4, { F_24,F_16,F_28,0,0,0 } /* e.g. cpsdr */ |
|
-#define INSTR_RRF_F0FR 4, { F_24,F_16,R_28,0,0,0 } /* e.g. iedtr */ |
|
-#define INSTR_RRF_FE0FER 4, { FE_24,FE_16,R_28,0,0,0 } /* e.g. iextr */ |
|
-#define INSTR_RRF_FUFF 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. didbr */ |
|
-#define INSTR_RRF_FEUFEFE 4, { FE_24,FE_16,FE_28,U4_20,0,0 } /* e.g. qaxtr */ |
|
-#define INSTR_RRF_FUFF2 4, { F_24,F_28,F_16,U4_20,0,0 } /* e.g. adtra */ |
|
-#define INSTR_RRF_FEUFEFE2 4, { FE_24,FE_28,FE_16,U4_20,0,0 } /* e.g. axtra */ |
|
-#define INSTR_RRF_RURR 4, { R_24,R_28,R_16,U4_20,0,0 } /* e.g. .insn */ |
|
-#define INSTR_RRF_RURR2 4, { R_24,R_16,R_28,U4_20,0,0 } /* e.g. lptea */ |
|
-#define INSTR_RRF_R0RR 4, { R_24,R_16,R_28,0,0,0 } /* e.g. idte */ |
|
-#define INSTR_RRF_R0RR2 4, { R_24,R_28,R_16,0,0,0 } /* e.g. ark */ |
|
-#define INSTR_RRF_RMRR 4, { R_24,R_16,R_28,M_20OPT,0,0 } /* e.g. crdte */ |
|
-#define INSTR_RRF_U0FF 4, { F_24,U4_16,F_28,0,0,0 } /* e.g. fixr */ |
|
-#define INSTR_RRF_U0FEFE 4, { FE_24,U4_16,FE_28,0,0,0 } /* e.g. fixbr */ |
|
-#define INSTR_RRF_U0RF 4, { R_24,U4_16,F_28,0,0,0 } /* e.g. cfebr */ |
|
-#define INSTR_RRF_U0RFE 4, { R_24,U4_16,FE_28,0,0,0 } /* e.g. cfxbr */ |
|
-#define INSTR_RRF_UUFF 4, { F_24,U4_16,F_28,U4_20,0,0 } /* e.g. fidtr */ |
|
-#define INSTR_RRF_UUFFE 4, { F_24,U4_16,FE_28,U4_20,0,0 } /* e.g. ldxtr */ |
|
-#define INSTR_RRF_UUFEFE 4, { FE_24,U4_16,FE_28,U4_20,0,0 } /* e.g. fixtr */ |
|
-#define INSTR_RRF_0UFF 4, { F_24,F_28,U4_20,0,0,0 } /* e.g. ldetr */ |
|
-#define INSTR_RRF_0UFEF 4, { FE_24,F_28,U4_20,0,0,0 } /* e.g. lxdtr */ |
|
-#define INSTR_RRF_FFRU 4, { F_24,F_16,R_28,U4_20,0,0 } /* e.g. rrdtr */ |
|
-#define INSTR_RRF_FEFERU 4, { FE_24,FE_16,R_28,U4_20,0,0 } /* e.g. rrxtr */ |
|
-#define INSTR_RRF_M0RR 4, { R_24,R_28,M_16OPT,0,0,0 } /* e.g. sske */ |
|
-#define INSTR_RRF_M0RER 4, { RE_24,R_28,M_16OPT,0,0,0 } /* e.g. trte */ |
|
-#define INSTR_RRF_M0RERE 4, { RE_24,RE_28,M_16OPT,0,0,0 } /* e.g. troo */ |
|
-#define INSTR_RRF_U0RR 4, { R_24,R_28,U4_16,0,0,0 } /* e.g. clrt */ |
|
-#define INSTR_RRF_00RR 4, { R_24,R_28,0,0,0,0 } /* e.g. clrtne */ |
|
-#define INSTR_RRF_UUFR 4, { F_24,U4_16,R_28,U4_20,0,0 } /* e.g. cdgtra */ |
|
-#define INSTR_RRF_UUFER 4, { FE_24,U4_16,R_28,U4_20,0,0 } /* e.g. cxfbra */ |
|
-#define INSTR_RRF_UURF 4, { R_24,U4_16,F_28,U4_20,0,0 } /* e.g. cgdtra */ |
|
-#define INSTR_RRF_UURFE 4, { R_24,U4_16,FE_28,U4_20,0,0 } /* e.g. cfxbra */ |
|
-#define INSTR_RR_0R 2, { R_12, 0,0,0,0,0 } /* e.g. br */ |
|
-#define INSTR_RR_0R_OPT 2, { RO_12, 0,0,0,0,0 } /* e.g. nopr */ |
|
-#define INSTR_RR_FF 2, { F_8,F_12,0,0,0,0 } /* e.g. adr */ |
|
-#define INSTR_RR_FEF 2, { FE_8,F_12,0,0,0,0 } /* e.g. mxdr */ |
|
-#define INSTR_RR_FFE 2, { F_8,FE_12,0,0,0,0 } /* e.g. ldxr */ |
|
-#define INSTR_RR_FEFE 2, { FE_8,FE_12,0,0,0,0 } /* e.g. axr */ |
|
-#define INSTR_RR_R0 2, { R_8, 0,0,0,0,0 } /* e.g. spm */ |
|
-#define INSTR_RR_RR 2, { R_8,R_12,0,0,0,0 } /* e.g. lr */ |
|
-#define INSTR_RR_RER 2, { RE_8,R_12,0,0,0,0 } /* e.g. dr */ |
|
-#define INSTR_RR_U0 2, { U8_8, 0,0,0,0,0 } /* e.g. svc */ |
|
-#define INSTR_RR_UR 2, { U4_8,R_12,0,0,0,0 } /* e.g. bcr */ |
|
-#define INSTR_RRR_F0FF 4, { F_24,F_28,F_16,0,0,0 } /* e.g. ddtr */ |
|
-#define INSTR_RRR_FE0FEFE 4, { FE_24,FE_28,FE_16,0,0,0 } /* e.g. axtr */ |
|
-#define INSTR_RRS_RRRDU 6, { R_8,R_12,U4_32,D_20,B_16 } /* e.g. crb */ |
|
-#define INSTR_RRS_RRRD0 6, { R_8,R_12,D_20,B_16,0 } /* e.g. crbne */ |
|
-#define INSTR_RSE_RRRD 6, { R_8,R_12,D_20,B_16,0,0 } /* e.g. lmh */ |
|
-#define INSTR_RSE_RERERD 6, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. mvclu */ |
|
-#define INSTR_RSE_CCRD 6, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lmh */ |
|
-#define INSTR_RSE_RURD 6, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icmh */ |
|
-#define INSTR_RSL_R0RD 6, { D_20,L4_8,B_16,0,0,0 } /* e.g. tp */ |
|
-#define INSTR_RSL_LRDFU 6, { F_32,D_20,L8_8,B_16,U4_36,0 } /* e.g. cdzt */ |
|
-#define INSTR_RSL_LRDFEU 6, { FE_32,D_20,L8_8,B_16,U4_36,0 } /* e.g. cxzt */ |
|
-#define INSTR_RSI_RRP 4, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxh */ |
|
-#define INSTR_RSY_RRRD 6, { R_8,R_12,D20_20,B_16,0,0 } /* e.g. stmy */ |
|
-#define INSTR_RSY_RERERD 6, { RE_8,RE_12,D20_20,B_16,0,0 } /* e.g. cdsy */ |
|
-#define INSTR_RSY_RURD 6, { R_8,U4_12,D20_20,B_16,0,0 } /* e.g. icmh */ |
|
-#define INSTR_RSY_RURD2 6, { R_8,D20_20,B_16,U4_12,0,0 } /* e.g. loc */ |
|
-#define INSTR_RSY_R0RD 6, { R_8,D20_20,B_16,0,0,0 } /* e.g. locgt */ |
|
-#define INSTR_RSY_AARD 6, { A_8,A_12,D20_20,B_16,0,0 } /* e.g. lamy */ |
|
-#define INSTR_RSY_CCRD 6, { C_8,C_12,D20_20,B_16,0,0 } /* e.g. stctg */ |
|
-#define INSTR_RS_AARD 4, { A_8,A_12,D_20,B_16,0,0 } /* e.g. lam */ |
|
-#define INSTR_RS_CCRD 4, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lctl */ |
|
-#define INSTR_RS_R0RD 4, { R_8,D_20,B_16,0,0,0 } /* e.g. sll */ |
|
-#define INSTR_RS_RE0RD 4, { RE_8,D_20,B_16,0,0,0 } /* e.g. slda */ |
|
-#define INSTR_RS_RRRD 4, { R_8,R_12,D_20,B_16,0,0 } /* e.g. cs */ |
|
-#define INSTR_RS_RERERD 4, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. cds */ |
|
-#define INSTR_RS_RURD 4, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icm */ |
|
-#define INSTR_RXE_FRRD 6, { F_8,D_20,X_12,B_16,0,0 } /* e.g. axbr */ |
|
-#define INSTR_RXE_FERRD 6, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. lxdb */ |
|
-#define INSTR_RXE_RRRD 6, { R_8,D_20,X_12,B_16,0,0 } /* e.g. lg */ |
|
-#define INSTR_RXE_RERRD 6, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. dsg */ |
|
-#define INSTR_RXF_FRRDF 6, { F_32,F_8,D_20,X_12,B_16,0 } /* e.g. madb */ |
|
-#define INSTR_RXF_FRRDFE 6, { FE_32,F_8,D_20,X_12,B_16,0 } /* e.g. my */ |
|
-#define INSTR_RXF_FERRDFE 6, { FE_32,FE_8,D_20,X_12,B_16,0 } /* e.g. slxt */ |
|
-#define INSTR_RXF_RRRDR 6, { R_32,R_8,D_20,X_12,B_16,0 } /* e.g. .insn */ |
|
-#define INSTR_RXY_RRRD 6, { R_8,D20_20,X_12,B_16,0,0 } /* e.g. ly */ |
|
-#define INSTR_RXY_RERRD 6, { RE_8,D20_20,X_12,B_16,0,0 } /* e.g. dsg */ |
|
-#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */ |
|
-#define INSTR_RXY_URRD 6, { U4_8,D20_20,X_12,B_16,0,0 } /* e.g. pfd */ |
|
-#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */ |
|
-#define INSTR_RX_0RRD_OPT 4, { DO_20,X_12,B_16,0,0,0 } /* e.g. nop */ |
|
-#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */ |
|
-#define INSTR_RX_FERRD 4, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. mxd */ |
|
-#define INSTR_RX_RRRD 4, { R_8,D_20,X_12,B_16,0,0 } /* e.g. l */ |
|
-#define INSTR_RX_RERRD 4, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. d */ |
|
-#define INSTR_RX_URRD 4, { U4_8,D_20,X_12,B_16,0,0 } /* e.g. bc */ |
|
-#define INSTR_SI_URD 4, { D_20,B_16,U8_8,0,0,0 } /* e.g. cli */ |
|
-#define INSTR_SIY_URD 6, { D20_20,B_16,U8_8,0,0,0 } /* e.g. tmy */ |
|
-#define INSTR_SIY_IRD 6, { D20_20,B_16,I8_8,0,0,0 } /* e.g. asi */ |
|
-#define INSTR_SIL_RDI 6, { D_20,B_16,I16_32,0,0,0 } /* e.g. chhsi */ |
|
-#define INSTR_SIL_RDU 6, { D_20,B_16,U16_32,0,0,0 } /* e.g. clfhsi */ |
|
-#define INSTR_SMI_U0RDP 6, { U4_8,J16_32,D_20,B_16,0,0 } /* e.g. bpp */ |
|
-#define INSTR_SSE_RDRD 6, { D_20,B_16,D_36,B_32,0,0 } /* e.g. mvsdk */ |
|
-#define INSTR_SS_L0RDRD 6, { D_20,L8_8,B_16,D_36,B_32,0 } /* e.g. mvc */ |
|
-#define INSTR_SS_L2RDRD 6, { D_20,B_16,D_36,L8_8,B_32,0 } /* e.g. pka */ |
|
-#define INSTR_SS_LIRDRD 6, { D_20,L4_8,B_16,D_36,B_32,U4_12 } /* e.g. srp */ |
|
-#define INSTR_SS_LLRDRD 6, { D_20,L4_8,B_16,D_36,L4_12,B_32 } /* e.g. pack */ |
|
-#define INSTR_SS_RRRDRD 6, { D_20,R_8,B_16,D_36,B_32,R_12 } /* e.g. mvck */ |
|
-#define INSTR_SS_RRRDRD2 6, { R_8,D_20,B_16,R_12,D_36,B_32 } /* e.g. plo */ |
|
-#define INSTR_SS_RRRDRD3 6, { R_8,R_12,D_20,B_16,D_36,B_32 } /* e.g. lmd */ |
|
-#define INSTR_SSF_RRDRD 6, { D_20,B_16,D_36,B_32,R_8,0 } /* e.g. mvcos */ |
|
-#define INSTR_SSF_RRDRD2 6, { R_8,D_20,B_16,D_36,B_32,0 } |
|
-#define INSTR_SSF_RERDRD2 6, { RE_8,D_20,B_16,D_36,B_32,0 } /* e.g. lpd */ |
|
-#define INSTR_S_00 4, { 0,0,0,0,0,0 } /* e.g. hsch */ |
|
-#define INSTR_S_RD 4, { D_20,B_16,0,0,0,0 } /* e.g. lpsw */ |
|
- |
|
-#define MASK_E { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_IE_UU { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_MII_UPP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIE_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIE_RRPU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIE_RRP0 { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
-#define MASK_RIE_RRI0 { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
-#define MASK_RIE_RUPI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIE_R0PI { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
-#define MASK_RIE_RUPU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIE_R0PU { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
-#define MASK_RIE_R0IU { 0xff, 0x0f, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RIE_R0I0 { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RIE_R0UU { 0xff, 0x0f, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RIE_R0U0 { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RIE_RRUUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIL_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIL_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIL_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIL_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIL_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RI_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RI_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RI_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RI_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RI_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RIS_RURDI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIS_R0RDI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIS_RURDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RIS_R0RDU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RRE_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } |
|
-#define MASK_RRE_0R { 0xff, 0xff, 0xff, 0xf0, 0x00, 0x00 } |
|
-#define MASK_RRE_AA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_AR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_F0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
-#define MASK_RRE_FE0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
-#define MASK_RRE_FF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_FEF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_FFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_FEFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_R0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
-#define MASK_RRE_RA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RERE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_FR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_FER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRE_RR_OPT { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FE0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_F0FF2 { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_F0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FE0FER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FEUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FUFF2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define INSTR_E 2, { 0,0,0,0,0,0 } /* e.g. pr */ |
|
+#define INSTR_IE_UU 4, { U4_24,U4_28,0,0,0,0 } /* e.g. niai */ |
|
+#define INSTR_MII_UPP 6, { U4_8,J12_12,J24_24 } /* e.g. bprp */ |
|
+#define INSTR_RIE_RRP 6, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxhg */ |
|
+#define INSTR_RIE_RRPU 6, { R_8,R_12,U4_32,J16_16,0,0 } /* e.g. crj */ |
|
+#define INSTR_RIE_RRP0 6, { R_8,R_12,J16_16,0,0,0 } /* e.g. cgrjne */ |
|
+#define INSTR_RIE_RRI0 6, { R_8,R_12,I16_16,0,0,0 } /* e.g. ahik */ |
|
+#define INSTR_RIE_RUPI 6, { R_8,I8_32,U4_12,J16_16,0,0 } /* e.g. cij */ |
|
+#define INSTR_RIE_R0PI 6, { R_8,I8_32,J16_16,0,0,0 } /* e.g. cijne */ |
|
+#define INSTR_RIE_RUPU 6, { R_8,U8_32,U4_12,J16_16,0,0 } /* e.g. clij */ |
|
+#define INSTR_RIE_R0PU 6, { R_8,U8_32,J16_16,0,0,0 } /* e.g. clijne */ |
|
+#define INSTR_RIE_R0IU 6, { R_8,I16_16,U4_32,0,0,0 } /* e.g. cit */ |
|
+#define INSTR_RIE_R0I0 6, { R_8,I16_16,0,0,0,0 } /* e.g. citne */ |
|
+#define INSTR_RIE_R0UU 6, { R_8,U16_16,U4_32,0,0,0 } /* e.g. clfit */ |
|
+#define INSTR_RIE_R0U0 6, { R_8,U16_16,0,0,0,0 } /* e.g. clfitne */ |
|
+#define INSTR_RIE_RRUUU 6, { R_8,R_12,U8_16,U8_24,U8_32,0 } /* e.g. rnsbg */ |
|
+#define INSTR_RIL_0P 6, { J32_16,0,0,0,0 } /* e.g. jg */ |
|
+#define INSTR_RIL_RP 6, { R_8,J32_16,0,0,0,0 } /* e.g. brasl */ |
|
+#define INSTR_RIL_UP 6, { U4_8,J32_16,0,0,0,0 } /* e.g. brcl */ |
|
+#define INSTR_RIL_RI 6, { R_8,I32_16,0,0,0,0 } /* e.g. afi */ |
|
+#define INSTR_RIL_RU 6, { R_8,U32_16,0,0,0,0 } /* e.g. alfi */ |
|
+#define INSTR_RI_0P 4, { J16_16,0,0,0,0,0 } /* e.g. j */ |
|
+#define INSTR_RI_RI 4, { R_8,I16_16,0,0,0,0 } /* e.g. ahi */ |
|
+#define INSTR_RI_RP 4, { R_8,J16_16,0,0,0,0 } /* e.g. brct */ |
|
+#define INSTR_RI_RU 4, { R_8,U16_16,0,0,0,0 } /* e.g. tml */ |
|
+#define INSTR_RI_UP 4, { U4_8,J16_16,0,0,0,0 } /* e.g. brc */ |
|
+#define INSTR_RIS_RURDI 6, { R_8,I8_32,U4_12,D_20,B_16,0 } /* e.g. cib */ |
|
+#define INSTR_RIS_R0RDI 6, { R_8,I8_32,D_20,B_16,0,0 } /* e.g. cibne */ |
|
+#define INSTR_RIS_RURDU 6, { R_8,U8_32,U4_12,D_20,B_16,0 } /* e.g. clib */ |
|
+#define INSTR_RIS_R0RDU 6, { R_8,U8_32,D_20,B_16,0,0 } /* e.g. clibne*/ |
|
+#define INSTR_RRE_00 4, { 0,0,0,0,0,0 } /* e.g. palb */ |
|
+#define INSTR_RRE_0R 4, { R_28,0,0,0,0,0 } /* e.g. tb */ |
|
+#define INSTR_RRE_AA 4, { A_24,A_28,0,0,0,0 } /* e.g. cpya */ |
|
+#define INSTR_RRE_AR 4, { A_24,R_28,0,0,0,0 } /* e.g. sar */ |
|
+#define INSTR_RRE_F0 4, { F_24,0,0,0,0,0 } /* e.g. lzer */ |
|
+#define INSTR_RRE_FE0 4, { FE_24,0,0,0,0,0 } /* e.g. lzxr */ |
|
+#define INSTR_RRE_FF 4, { F_24,F_28,0,0,0,0 } /* e.g. debr */ |
|
+#define INSTR_RRE_FEF 4, { FE_24,F_28,0,0,0,0 } /* e.g. lxdbr */ |
|
+#define INSTR_RRE_FFE 4, { F_24,FE_28,0,0,0,0 } /* e.g. lexr */ |
|
+#define INSTR_RRE_FEFE 4, { FE_24,FE_28,0,0,0,0 } /* e.g. dxr */ |
|
+#define INSTR_RRE_R0 4, { R_24,0,0,0,0,0 } /* e.g. ipm */ |
|
+#define INSTR_RRE_RA 4, { R_24,A_28,0,0,0,0 } /* e.g. ear */ |
|
+#define INSTR_RRE_RF 4, { R_24,F_28,0,0,0,0 } /* e.g. lgdr */ |
|
+#define INSTR_RRE_RFE 4, { R_24,FE_28,0,0,0,0 } /* e.g. csxtr */ |
|
+#define INSTR_RRE_RR 4, { R_24,R_28,0,0,0,0 } /* e.g. lura */ |
|
+#define INSTR_RRE_RER 4, { RE_24,R_28,0,0,0,0 } /* e.g. tre */ |
|
+#define INSTR_RRE_RERE 4, { RE_24,RE_28,0,0,0,0 } /* e.g. cuse */ |
|
+#define INSTR_RRE_FR 4, { F_24,R_28,0,0,0,0 } /* e.g. ldgr */ |
|
+#define INSTR_RRE_FER 4, { FE_24,R_28,0,0,0,0 } /* e.g. cxfbr */ |
|
+#define INSTR_RRF_F0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. madbr */ |
|
+#define INSTR_RRF_FE0FF 4, { F_16,F_24,F_28,0,0,0 } /* e.g. myr */ |
|
+#define INSTR_RRF_F0FF2 4, { F_24,F_16,F_28,0,0,0 } /* e.g. cpsdr */ |
|
+#define INSTR_RRF_F0FR 4, { F_24,F_16,R_28,0,0,0 } /* e.g. iedtr */ |
|
+#define INSTR_RRF_FE0FER 4, { FE_24,FE_16,R_28,0,0,0 } /* e.g. iextr */ |
|
+#define INSTR_RRF_FUFF 4, { F_24,F_16,F_28,U4_20,0,0 } /* e.g. didbr */ |
|
+#define INSTR_RRF_FEUFEFE 4, { FE_24,FE_16,FE_28,U4_20,0,0 } /* e.g. qaxtr */ |
|
+#define INSTR_RRF_FUFF2 4, { F_24,F_28,F_16,U4_20,0,0 } /* e.g. adtra */ |
|
+#define INSTR_RRF_FEUFEFE2 4, { FE_24,FE_28,FE_16,U4_20,0,0 } /* e.g. axtra */ |
|
+#define INSTR_RRF_RURR 4, { R_24,R_28,R_16,U4_20,0,0 } /* e.g. .insn */ |
|
+#define INSTR_RRF_RURR2 4, { R_24,R_16,R_28,U4_20,0,0 } /* e.g. lptea */ |
|
+#define INSTR_RRF_R0RR 4, { R_24,R_16,R_28,0,0,0 } /* e.g. idte */ |
|
+#define INSTR_RRF_R0RR2 4, { R_24,R_28,R_16,0,0,0 } /* e.g. ark */ |
|
+#define INSTR_RRF_U0FF 4, { F_24,U4_16,F_28,0,0,0 } /* e.g. fidbr */ |
|
+#define INSTR_RRF_U0FEFE 4, { FE_24,U4_16,FE_28,0,0,0 } /* e.g. fixbr */ |
|
+#define INSTR_RRF_U0RF 4, { R_24,U4_16,F_28,0,0,0 } /* e.g. cfebr */ |
|
+#define INSTR_RRF_U0RFE 4, { R_24,U4_16,FE_28,0,0,0 } /* e.g. cfxbr */ |
|
+#define INSTR_RRF_UUFF 4, { F_24,U4_16,F_28,U4_20,0,0 } /* e.g. fidtr */ |
|
+#define INSTR_RRF_UUFFE 4, { F_24,U4_16,FE_28,U4_20,0,0 } /* e.g. ldxtr */ |
|
+#define INSTR_RRF_UUFEFE 4, { FE_24,U4_16,FE_28,U4_20,0,0 } /* e.g. fixtr */ |
|
+#define INSTR_RRF_0UFF 4, { F_24,F_28,U4_20,0,0,0 } /* e.g. ldetr */ |
|
+#define INSTR_RRF_0UFEF 4, { FE_24,F_28,U4_20,0,0,0 } /* e.g. lxdtr */ |
|
+#define INSTR_RRF_FFRU 4, { F_24,F_16,R_28,U4_20,0,0 } /* e.g. rrdtr */ |
|
+#define INSTR_RRF_FEFERU 4, { FE_24,FE_16,R_28,U4_20,0,0 } /* e.g. rrxtr */ |
|
+#define INSTR_RRF_U0RR 4, { R_24,R_28,U4_16,0,0,0 } /* e.g. sske */ |
|
+#define INSTR_RRF_U0RER 4, { RE_24,R_28,U4_16,0,0,0 } /* e.g. trte */ |
|
+#define INSTR_RRF_U0RERE 4, { RE_24,RE_28,U4_16,0,0,0 } /* e.g. troo */ |
|
+#define INSTR_RRF_00RR 4, { R_24,R_28,0,0,0,0 } /* e.g. clrtne */ |
|
+#define INSTR_RRF_UUFR 4, { F_24,U4_16,R_28,U4_20,0,0 } /* e.g. cdgtra */ |
|
+#define INSTR_RRF_UUFER 4, { FE_24,U4_16,R_28,U4_20,0,0 } /* e.g. cxfbra */ |
|
+#define INSTR_RRF_UURF 4, { R_24,U4_16,F_28,U4_20,0,0 } /* e.g. cgdtra */ |
|
+#define INSTR_RRF_UURFE 4, { R_24,U4_16,FE_28,U4_20,0,0 } /* e.g. cfxbra */ |
|
+#define INSTR_RR_0R 2, { R_12, 0,0,0,0,0 } /* e.g. br */ |
|
+#define INSTR_RR_FF 2, { F_8,F_12,0,0,0,0 } /* e.g. adr */ |
|
+#define INSTR_RR_FEF 2, { FE_8,F_12,0,0,0,0 } /* e.g. mxdr */ |
|
+#define INSTR_RR_FFE 2, { F_8,FE_12,0,0,0,0 } /* e.g. ldxr */ |
|
+#define INSTR_RR_FEFE 2, { FE_8,FE_12,0,0,0,0 } /* e.g. axr */ |
|
+#define INSTR_RR_R0 2, { R_8, 0,0,0,0,0 } /* e.g. spm */ |
|
+#define INSTR_RR_RR 2, { R_8,R_12,0,0,0,0 } /* e.g. lr */ |
|
+#define INSTR_RR_RER 2, { RE_8,R_12,0,0,0,0 } /* e.g. dr */ |
|
+#define INSTR_RR_U0 2, { U8_8, 0,0,0,0,0 } /* e.g. svc */ |
|
+#define INSTR_RR_UR 2, { U4_8,R_12,0,0,0,0 } /* e.g. bcr */ |
|
+#define INSTR_RRR_F0FF 4, { F_24,F_28,F_16,0,0,0 } /* e.g. ddtr */ |
|
+#define INSTR_RRR_FE0FEFE 4, { FE_24,FE_28,FE_16,0,0,0 } /* e.g. axtr */ |
|
+#define INSTR_RRS_RRRDU 6, { R_8,R_12,U4_32,D_20,B_16 } /* e.g. crb */ |
|
+#define INSTR_RRS_RRRD0 6, { R_8,R_12,D_20,B_16,0 } /* e.g. crbne */ |
|
+#define INSTR_RSE_RRRD 6, { R_8,R_12,D_20,B_16,0,0 } /* e.g. lmh */ |
|
+#define INSTR_RSE_RERERD 6, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. mvclu */ |
|
+#define INSTR_RSE_CCRD 6, { C_8,C_12,D_20,B_16,0,0 } /* e.g. stctg */ |
|
+#define INSTR_RSE_RURD 6, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icmh */ |
|
+#define INSTR_RSL_R0RD 6, { D_20,L4_8,B_16,0,0,0 } /* e.g. tp */ |
|
+#define INSTR_RSL_LRDFU 6, { F_32,D_20,L8_8,B_16,U4_36,0 } /* e.g. cdzt */ |
|
+#define INSTR_RSL_LRDFEU 6, { FE_32,D_20,L8_8,B_16,U4_36,0 } /* e.g. cxzt */ |
|
+#define INSTR_RSI_RRP 4, { R_8,R_12,J16_16,0,0,0 } /* e.g. brxh */ |
|
+#define INSTR_RSY_RRRD 6, { R_8,R_12,D20_20,B_16,0,0 } /* e.g. stmy */ |
|
+#define INSTR_RSY_RERERD 6, { RE_8,RE_12,D20_20,B_16,0,0 } /* e.g. cdsy */ |
|
+#define INSTR_RSY_RURD 6, { R_8,U4_12,D20_20,B_16,0,0 } /* e.g. icmh */ |
|
+#define INSTR_RSY_RURD2 6, { R_8,D20_20,B_16,U4_12,0,0 } /* e.g. loc */ |
|
+#define INSTR_RSY_R0RD 6, { R_8,D20_20,B_16,0,0,0 } /* e.g. locne */ |
|
+#define INSTR_RSY_AARD 6, { A_8,A_12,D20_20,B_16,0,0 } /* e.g. lamy */ |
|
+#define INSTR_RSY_CCRD 6, { C_8,C_12,D20_20,B_16,0,0 } /* e.g. stctg */ |
|
+#define INSTR_RS_AARD 4, { A_8,A_12,D_20,B_16,0,0 } /* e.g. lam */ |
|
+#define INSTR_RS_CCRD 4, { C_8,C_12,D_20,B_16,0,0 } /* e.g. lctl */ |
|
+#define INSTR_RS_R0RD 4, { R_8,D_20,B_16,0,0,0 } /* e.g. sll */ |
|
+#define INSTR_RS_RE0RD 4, { RE_8,D_20,B_16,0,0,0 } /* e.g. slda */ |
|
+#define INSTR_RS_RRRD 4, { R_8,R_12,D_20,B_16,0,0 } /* e.g. cs */ |
|
+#define INSTR_RS_RERERD 4, { RE_8,RE_12,D_20,B_16,0,0 } /* e.g. cds */ |
|
+#define INSTR_RS_RURD 4, { R_8,U4_12,D_20,B_16,0,0 } /* e.g. icm */ |
|
+#define INSTR_RXE_FRRD 6, { F_8,D_20,X_12,B_16,0,0 } /* e.g. adb */ |
|
+#define INSTR_RXE_FERRD 6, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. lxdb */ |
|
+#define INSTR_RXE_RRRD 6, { R_8,D_20,X_12,B_16,0,0 } /* e.g. lg */ |
|
+#define INSTR_RXE_RRRDU 6, { R_8,D_20,X_12,B_16,U4_32,0 } /* e.g. lcbb */ |
|
+#define INSTR_RXE_RERRD 6, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. dsg */ |
|
+#define INSTR_RXF_FRRDF 6, { F_32,F_8,D_20,X_12,B_16,0 } /* e.g. madb */ |
|
+#define INSTR_RXF_FRRDFE 6, { FE_32,F_8,D_20,X_12,B_16,0 } /* e.g. my */ |
|
+#define INSTR_RXF_FERRDFE 6, { FE_32,FE_8,D_20,X_12,B_16,0 } /* e.g. slxt */ |
|
+#define INSTR_RXF_RRRDR 6, { R_32,R_8,D_20,X_12,B_16,0 } /* e.g. .insn */ |
|
+#define INSTR_RXY_RRRD 6, { R_8,D20_20,X_12,B_16,0,0 } /* e.g. ly */ |
|
+#define INSTR_RXY_RERRD 6, { RE_8,D20_20,X_12,B_16,0,0 } /* e.g. dsg */ |
|
+#define INSTR_RXY_FRRD 6, { F_8,D20_20,X_12,B_16,0,0 } /* e.g. ley */ |
|
+#define INSTR_RXY_URRD 6, { U4_8,D20_20,X_12,B_16,0,0 } /* e.g. pfd */ |
|
+#define INSTR_RX_0RRD 4, { D_20,X_12,B_16,0,0,0 } /* e.g. be */ |
|
+#define INSTR_RX_FRRD 4, { F_8,D_20,X_12,B_16,0,0 } /* e.g. ae */ |
|
+#define INSTR_RX_FERRD 4, { FE_8,D_20,X_12,B_16,0,0 } /* e.g. mxd */ |
|
+#define INSTR_RX_RRRD 4, { R_8,D_20,X_12,B_16,0,0 } /* e.g. l */ |
|
+#define INSTR_RX_RERRD 4, { RE_8,D_20,X_12,B_16,0,0 } /* e.g. d */ |
|
+#define INSTR_RX_URRD 4, { U4_8,D_20,X_12,B_16,0,0 } /* e.g. bc */ |
|
+#define INSTR_SI_URD 4, { D_20,B_16,U8_8,0,0,0 } /* e.g. cli */ |
|
+#define INSTR_SIY_URD 6, { D20_20,B_16,U8_8,0,0,0 } /* e.g. tmy */ |
|
+#define INSTR_SIY_IRD 6, { D20_20,B_16,I8_8,0,0,0 } /* e.g. asi */ |
|
+#define INSTR_SIL_RDI 6, { D_20,B_16,I16_32,0,0,0 } /* e.g. chhsi */ |
|
+#define INSTR_SIL_RDU 6, { D_20,B_16,U16_32,0,0,0 } /* e.g. clfhsi */ |
|
+#define INSTR_SMI_U0RDP 6, { U4_8,J16_32,D_20,B_16,0,0 } /* e.g. bpp */ |
|
+#define INSTR_SSE_RDRD 6, { D_20,B_16,D_36,B_32,0,0 } /* e.g. mvcdk */ |
|
+#define INSTR_SS_L0RDRD 6, { D_20,L8_8,B_16,D_36,B_32,0 } /* e.g. mvc */ |
|
+#define INSTR_SS_L2RDRD 6, { D_20,B_16,D_36,L8_8,B_32,0 } /* e.g. pka */ |
|
+#define INSTR_SS_LIRDRD 6, { D_20,L4_8,B_16,D_36,B_32,U4_12 } /* e.g. srp */ |
|
+#define INSTR_SS_LLRDRD 6, { D_20,L4_8,B_16,D_36,L4_12,B_32 } /* e.g. pack */ |
|
+#define INSTR_SS_RRRDRD 6, { D_20,R_8,B_16,D_36,B_32,R_12 } /* e.g. mvck */ |
|
+#define INSTR_SS_RRRDRD2 6, { R_8,D_20,B_16,R_12,D_36,B_32 } /* e.g. plo */ |
|
+#define INSTR_SS_RRRDRD3 6, { R_8,R_12,D_20,B_16,D_36,B_32 } /* e.g. lmd */ |
|
+#define INSTR_SSF_RRDRD 6, { D_20,B_16,D_36,B_32,R_8,0 } /* e.g. mvcos */ |
|
+#define INSTR_SSF_RERDRD2 6, { RE_8,D_20,B_16,D_36,B_32,0 } /* e.g. lpd */ |
|
+#define INSTR_S_00 4, { 0,0,0,0,0,0 } /* e.g. hsch */ |
|
+#define INSTR_S_RD 4, { D_20,B_16,0,0,0,0 } /* e.g. lpsw */ |
|
+#define INSTR_VRV_VVXRDU 6, { V_8,D_20,VX_12,B_16,U4_32,0 } /* e.g. vgef */ |
|
+#define INSTR_VRI_V0U 6, { V_8,U16_16,0,0,0,0 } /* e.g. vgbm */ |
|
+#define INSTR_VRI_V 6, { V_8,0,0,0,0,0 } /* e.g. vzero */ |
|
+#define INSTR_VRI_V0UUU 6, { V_8,U8_16,U8_24,U4_32,0,0 } /* e.g. vgm */ |
|
+#define INSTR_VRI_V0UU 6, { V_8,U8_16,U8_24,0,0,0 } /* e.g. vgmb */ |
|
+#define INSTR_VRI_VVUU 6, { V_8,V_12,U16_16,U4_32,0,0 } /* e.g. vrep */ |
|
+#define INSTR_VRI_VVU 6, { V_8,V_12,U16_16,0,0,0 } /* e.g. vrepb */ |
|
+#define INSTR_VRI_VVU2 6, { V_8,V_12,U12_16,0,0,0 } /* e.g. vftcidb */ |
|
+#define INSTR_VRI_V0IU 6, { V_8,I16_16,U4_32,0,0,0 } /* e.g. vrepi */ |
|
+#define INSTR_VRI_V0I 6, { V_8,I16_16,0,0,0,0 } /* e.g. vrepib */ |
|
+#define INSTR_VRI_VVV0UU 6, { V_8,V_12,V_16,U8_24,U4_32,0 } /* e.g. verim */ |
|
+#define INSTR_VRI_VVV0U 6, { V_8,V_12,V_16,U8_24,0,0 } /* e.g. verimb*/ |
|
+#define INSTR_VRI_VVUUU 6, { V_8,V_12,U12_16,U4_32,U4_28,0 } /* e.g. vftci */ |
|
+#define INSTR_VRX_VRRD 6, { V_8,D_20,X_12,B_16,0,0 } /* e.g. vl */ |
|
+#define INSTR_VRX_VV 6, { V_8,V_12,0,0,0,0 } /* e.g. vlr */ |
|
+#define INSTR_VRX_VRRDU 6, { V_8,D_20,X_12,B_16,U4_32,0 } /* e.g. vlrp */ |
|
+#define INSTR_VRS_RVRDU 6, { R_8,V_12,D_20,B_16,U4_32,0 } /* e.g. vlgv */ |
|
+#define INSTR_VRS_RVRD 6, { R_8,V_12,D_20,B_16,0,0 } /* e.g. vlgvb */ |
|
+#define INSTR_VRS_VVRDU 6, { V_8,V_12,D_20,B_16,U4_32,0 } /* e.g. verll */ |
|
+#define INSTR_VRS_VVRD 6, { V_8,V_12,D_20,B_16,0,0 } /* e.g. vlm */ |
|
+#define INSTR_VRS_VRRDU 6, { V_8,R_12,D_20,B_16,U4_32,0 } /* e.g. vlvg */ |
|
+#define INSTR_VRS_VRRD 6, { V_8,R_12,D_20,B_16,0,0 } /* e.g. vlvgb */ |
|
+#define INSTR_VRR_VRR 6, { V_8,R_12,R_16,0,0,0 } /* e.g. vlvgp */ |
|
+#define INSTR_VRR_VVV0U 6, { V_8,V_12,V_16,U4_32,0,0 } /* e.g. vmrh */ |
|
+#define INSTR_VRR_VVV0U0 6, { V_8,V_12,V_16,U4_24,0,0 } /* e.g. vfaeb */ |
|
+#define INSTR_VRR_VVV0U1 6, { V_8,V_12,V_16,U4_OR1_24,0,0 } /* e.g. vfaebs*/ |
|
+#define INSTR_VRR_VVV0U2 6, { V_8,V_12,V_16,U4_OR2_24,0,0 } /* e.g. vfaezb*/ |
|
+#define INSTR_VRR_VVV0U3 6, { V_8,V_12,V_16,U4_OR3_24,0,0 } /* e.g. vfaezbs*/ |
|
+#define INSTR_VRR_VVV 6, { V_8,V_12,V_16,0,0,0 } /* e.g. vmrhb */ |
|
+#define INSTR_VRR_VVV2 6, { V_8,V_CP16_12,0,0,0,0 } /* e.g. vnot */ |
|
+#define INSTR_VRR_VV0U 6, { V_8,V_12,U4_32,0,0,0 } /* e.g. vseg */ |
|
+#define INSTR_VRR_VV0U2 6, { V_8,V_12,U4_24,0,0,0 } /* e.g. vistrb*/ |
|
+#define INSTR_VRR_VV0UU 6, { V_8,V_12,U4_28,U4_24,0,0 } /* e.g. vcdgb */ |
|
+#define INSTR_VRR_VV0UU8 6, { V_8,V_12,U4_OR8_28,U4_24,0,0 } /* e.g. wcdgb */ |
|
+#define INSTR_VRR_VV 6, { V_8,V_12,0,0,0,0 } /* e.g. vsegb */ |
|
+#define INSTR_VRR_VVVUU0V 6, { V_8,V_12,V_16,V_32,U4_20,U4_24 } /* e.g. vstrc */ |
|
+#define INSTR_VRR_VVVU0V 6, { V_8,V_12,V_16,V_32,U4_20,0 } /* e.g. vac */ |
|
+#define INSTR_VRR_VVVU0VB 6, { V_8,V_12,V_16,V_32,U4_24,0 } /* e.g. vstrcb*/ |
|
+#define INSTR_VRR_VVVU0VB1 6, { V_8,V_12,V_16,V_32,U4_OR1_24,0 } /* e.g. vstrcbs*/ |
|
+#define INSTR_VRR_VVVU0VB2 6, { V_8,V_12,V_16,V_32,U4_OR2_24,0 } /* e.g. vstrczb*/ |
|
+#define INSTR_VRR_VVVU0VB3 6, { V_8,V_12,V_16,V_32,U4_OR3_24,0 } /* e.g. vstrczbs*/ |
|
+#define INSTR_VRR_VVV0V 6, { V_8,V_12,V_16,V_32,0,0 } /* e.g. vacq */ |
|
+#define INSTR_VRR_VVV0U0U 6, { V_8,V_12,V_16,U4_32,U4_24,0 } /* e.g. vfae */ |
|
+#define INSTR_VRR_VVVV 6, { V_8,V_12,V_16,V_32,0,0 } /* e.g. vfmadb*/ |
|
+#define INSTR_VRR_VVV0UUU 6, { V_8,V_12,V_16,U4_32,U4_28,U4_24 }/* e.g. vfch */ |
|
+#define INSTR_VRR_VVV0UU 6, { V_8,V_12,V_16,U4_32,U4_28,0 } /* e.g. vfa */ |
|
+#define INSTR_VRR_VV0UUU 6, { V_8,V_12,U4_32,U4_28,U4_24,0 } /* e.g. vcdg */ |
|
+#define INSTR_VRR_VVVU0UV 6, { V_8,V_12,V_16,V_32,U4_28,U4_20 } /* e.g. vfma */ |
|
+#define INSTR_VRR_VV0U0U 6, { V_8,V_12,U4_32,U4_24,0,0 } /* e.g. vistr */ |
|
+ |
|
+#define MASK_E { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_IE_UU { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_MII_UPP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIE_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIE_RRPU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIE_RRP0 { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_RIE_RRI0 { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_RIE_RUPI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIE_R0PI { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_RIE_RUPU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIE_R0PU { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_RIE_R0IU { 0xff, 0x0f, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RIE_R0I0 { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RIE_R0UU { 0xff, 0x0f, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RIE_R0U0 { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RIE_RRUUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIL_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIL_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIL_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIL_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIL_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RI_0P { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RI_RI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RI_RP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RI_RU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RI_UP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RIS_RURDI { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIS_R0RDI { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIS_RURDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RIS_R0RDU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RRE_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } |
|
+#define MASK_RRE_0R { 0xff, 0xff, 0xff, 0xf0, 0x00, 0x00 } |
|
+#define MASK_RRE_AA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_AR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_F0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
+#define MASK_RRE_FE0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
+#define MASK_RRE_FF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_FEF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_FFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_FEFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_R0 { 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00 } |
|
+#define MASK_RRE_RA { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_RF { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_RFE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_RER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_RERE { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_FR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRE_FER { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FE0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_F0FF2 { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_F0FR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FE0FER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FEUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FUFF2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
#define MASK_RRF_FEUFEFE2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_RURR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_RURR2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_R0RR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_R0RR2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_RMRR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_U0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_U0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_U0RF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_U0RFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UUFFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_0UFF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_0UFEF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FFRU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_FEFERU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_M0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_M0RER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_M0RERE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_U0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_00RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UUFR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UUFER { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UURF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRF_UURFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_0R { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_0R_OPT { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_FF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_FEF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_FFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_FEFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_R0 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_RR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_RER { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_U0 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RR_UR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRR_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRR_FE0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
-#define MASK_RRS_RRRDU { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RRS_RRRD0 { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSE_RERERD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSE_CCRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSE_RURD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSL_R0RD { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RSL_LRDFU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSL_LRDFEU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSI_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_RE0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RS_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RSY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_RURD2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RSY_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RXE_FRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RXE_FERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RXE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RXE_RERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
-#define MASK_RXF_FRRDF { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RXF_FRRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RXF_FERRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RXF_RRRDR { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
-#define MASK_RXY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RXY_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RXY_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_0RRD_OPT { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_FERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_RX_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SI_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SIY_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_SIY_IRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
-#define MASK_SIL_RDI { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SIL_RDU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SMI_U0RDP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SSE_RDRD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_L0RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_L2RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_LIRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_LLRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_RRRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_RRRDRD2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SS_RRRDRD3 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SSF_RRDRD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SSF_RRDRD2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_SSF_RERDRD2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
-#define MASK_S_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } |
|
-#define MASK_S_RD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
- |
|
+#define MASK_RRF_RURR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_RURR2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_R0RR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_R0RR2 { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0RF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0RFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UUFF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UUFFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UUFEFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_0UFF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_0UFEF { 0xff, 0xff, 0xf0, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FFRU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_FEFERU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0RR { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0RER { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_U0RERE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_00RR { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UUFR { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UUFER { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UURF { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRF_UURFE { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_0R { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_FF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_FEF { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_FFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_FEFE { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_R0 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_RR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_RER { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_U0 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RR_UR { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRR_F0FF { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRR_FE0FEFE { 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00 } |
|
+#define MASK_RRS_RRRDU { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RRS_RRRD0 { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSE_RERERD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSE_CCRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSE_RURD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSL_R0RD { 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RSL_LRDFU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSL_LRDFEU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSI_RRP { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_RE0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RS_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RSY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_RERERD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_RURD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_RURD2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_R0RD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_AARD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RSY_CCRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RXE_FRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RXE_FERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RXE_RRRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RXE_RRRDU { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RXE_RERRD { 0xff, 0x00, 0x00, 0x00, 0xff, 0xff } |
|
+#define MASK_RXF_FRRDF { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RXF_FRRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RXF_FERRDFE { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RXF_RRRDR { 0xff, 0x00, 0x00, 0x00, 0x0f, 0xff } |
|
+#define MASK_RXY_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RXY_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RXY_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RXY_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_RX_0RRD { 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RX_FRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RX_FERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RX_RRRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RX_RERRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_RX_URRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SI_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SIY_URD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_SIY_IRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_SIL_RDI { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SIL_RDU { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SMI_U0RDP { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SSE_RDRD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_L0RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_L2RDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_LIRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_LLRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_RRRDRD { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_RRRDRD2 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SS_RRRDRD3 { 0xff, 0x00, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SSF_RRDRD { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_SSF_RERDRD2 { 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_S_00 { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00 } |
|
+#define MASK_S_RD { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } |
|
+#define MASK_VRV_VVXRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRI_V0U { 0xff, 0x0f, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRI_V { 0xff, 0x0f, 0xff, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRI_V0UUU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRI_V0UU { 0xff, 0x0f, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRI_VVUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRI_VVU { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRI_VVU2 { 0xff, 0x00, 0x00, 0x0f, 0xf0, 0xff } |
|
+#define MASK_VRI_V0IU { 0xff, 0x0f, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRI_V0I { 0xff, 0x0f, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRI_VVV0UU { 0xff, 0x00, 0x0f, 0x00, 0x00, 0xff } |
|
+#define MASK_VRI_VVV0U { 0xff, 0x00, 0x0f, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRI_VVUUU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRX_VRRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRX_VV { 0xff, 0x00, 0xff, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRX_VRRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRS_RVRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRS_RVRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRS_VVRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRS_VVRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRS_VRRDU { 0xff, 0x00, 0x00, 0x00, 0x00, 0xff } |
|
+#define MASK_VRS_VRRD { 0xff, 0x00, 0x00, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRR_VRR { 0xff, 0x00, 0x0f, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV0U { 0xff, 0x00, 0x0f, 0xff, 0x00, 0xff } |
|
+#define MASK_VRR_VVV0U0 { 0xff, 0x00, 0x0f, 0x0f, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV0U1 { 0xff, 0x00, 0x0f, 0x1f, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV0U2 { 0xff, 0x00, 0x0f, 0x2f, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV0U3 { 0xff, 0x00, 0x0f, 0x3f, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV { 0xff, 0x00, 0x0f, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV2 { 0xff, 0x00, 0x0f, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRR_VVV0V { 0xff, 0x00, 0x0f, 0xff, 0x00, 0xff } |
|
+#define MASK_VRR_VV0U { 0xff, 0x00, 0xff, 0xff, 0x00, 0xff } |
|
+#define MASK_VRR_VV0U2 { 0xff, 0x00, 0xff, 0x0f, 0xf0, 0xff } |
|
+#define MASK_VRR_VV0UU { 0xff, 0x00, 0xff, 0x00, 0xf0, 0xff } |
|
+#define MASK_VRR_VV0UU8 { 0xff, 0x00, 0xff, 0x08, 0xf0, 0xff } |
|
+#define MASK_VRR_VV { 0xff, 0x00, 0xff, 0xff, 0xf0, 0xff } |
|
+#define MASK_VRR_VVVUU0V { 0xff, 0x00, 0x00, 0x0f, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0V { 0xff, 0x00, 0x00, 0xff, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0VB { 0xff, 0x00, 0x0f, 0x0f, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0VB1 { 0xff, 0x00, 0x0f, 0x1f, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0VB2 { 0xff, 0x00, 0x0f, 0x2f, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0VB3 { 0xff, 0x00, 0x0f, 0x3f, 0x00, 0xff } |
|
+#define MASK_VRR_VVV0U0U { 0xff, 0x00, 0x0f, 0x0f, 0x00, 0xff } |
|
+#define MASK_VRR_VVVV { 0xff, 0x00, 0x0f, 0xff, 0x00, 0xff } |
|
+#define MASK_VRR_VVV0UUU { 0xff, 0x00, 0x0f, 0x00, 0x00, 0xff } |
|
+#define MASK_VRR_VVV0UU { 0xff, 0x00, 0x0f, 0xf0, 0x00, 0xff } |
|
+#define MASK_VRR_VV0UUU { 0xff, 0x00, 0xff, 0x00, 0x00, 0xff } |
|
+#define MASK_VRR_VVVU0UV { 0xff, 0x00, 0x00, 0xf0, 0x00, 0xff } |
|
+#define MASK_VRR_VV0U0U { 0xff, 0x00, 0xff, 0x0f, 0x00, 0xff } |
|
|
|
/* The opcode formats table (blueprints for .insn pseudo mnemonic). */ |
|
|
|
const struct s390_opcode s390_opformats[] = |
|
{ |
|
- { "e", OP8(0x00LL), MASK_E, INSTR_E, 3, 0 }, |
|
- { "ri", OP8(0x00LL), MASK_RI_RI, INSTR_RI_RI, 3, 0 }, |
|
- { "rie", OP8(0x00LL), MASK_RIE_RRP, INSTR_RIE_RRP, 3, 0 }, |
|
- { "ril", OP8(0x00LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 0 }, |
|
- { "rilu", OP8(0x00LL), MASK_RIL_RU, INSTR_RIL_RU, 3, 0 }, |
|
- { "ris", OP8(0x00LL), MASK_RIS_RURDI, INSTR_RIS_RURDI,3, 6 }, |
|
- { "rr", OP8(0x00LL), MASK_RR_RR, INSTR_RR_RR, 3, 0 }, |
|
- { "rre", OP8(0x00LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0 }, |
|
- { "rrf", OP8(0x00LL), MASK_RRF_RURR, INSTR_RRF_RURR, 3, 0 }, |
|
- { "rrs", OP8(0x00LL), MASK_RRS_RRRDU, INSTR_RRS_RRRDU,3, 6 }, |
|
- { "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 }, |
|
- { "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 }, |
|
- { "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 }, |
|
- { "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 }, |
|
- { "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 }, |
|
- { "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 }, |
|
- { "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR,3, 0 }, |
|
- { "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 }, |
|
- { "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 }, |
|
- { "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 }, |
|
- { "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 }, |
|
- { "sil", OP8(0x00LL), MASK_SIL_RDI, INSTR_SIL_RDI, 3, 6 }, |
|
- { "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD,3, 0 }, |
|
- { "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 }, |
|
- { "ssf", OP8(0x00LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD,3, 0 }, |
|
+ { "e", OP8(0x00LL), MASK_E, INSTR_E, 3, 0 ,0 }, |
|
+ { "ri", OP8(0x00LL), MASK_RI_RI, INSTR_RI_RI, 3, 0 ,0 }, |
|
+ { "rie", OP8(0x00LL), MASK_RIE_RRP, INSTR_RIE_RRP, 3, 0 ,0 }, |
|
+ { "ril", OP8(0x00LL), MASK_RIL_RP, INSTR_RIL_RP, 3, 0 ,0 }, |
|
+ { "rilu", OP8(0x00LL), MASK_RIL_RU, INSTR_RIL_RU, 3, 0 ,0 }, |
|
+ { "ris", OP8(0x00LL), MASK_RIS_RURDI, INSTR_RIS_RURDI,3, 6 ,0 }, |
|
+ { "rr", OP8(0x00LL), MASK_RR_RR, INSTR_RR_RR, 3, 0 ,0 }, |
|
+ { "rre", OP8(0x00LL), MASK_RRE_RR, INSTR_RRE_RR, 3, 0 ,0 }, |
|
+ { "rrf", OP8(0x00LL), MASK_RRF_RURR, INSTR_RRF_RURR, 3, 0 ,0 }, |
|
+ { "rrs", OP8(0x00LL), MASK_RRS_RRRDU, INSTR_RRS_RRRDU,3, 6 ,0 }, |
|
+ { "rs", OP8(0x00LL), MASK_RS_RRRD, INSTR_RS_RRRD, 3, 0 ,0 }, |
|
+ { "rse", OP8(0x00LL), MASK_RSE_RRRD, INSTR_RSE_RRRD, 3, 0 ,0 }, |
|
+ { "rsi", OP8(0x00LL), MASK_RSI_RRP, INSTR_RSI_RRP, 3, 0 ,0 }, |
|
+ { "rsy", OP8(0x00LL), MASK_RSY_RRRD, INSTR_RSY_RRRD, 3, 3 ,0 }, |
|
+ { "rx", OP8(0x00LL), MASK_RX_RRRD, INSTR_RX_RRRD, 3, 0 ,0 }, |
|
+ { "rxe", OP8(0x00LL), MASK_RXE_RRRD, INSTR_RXE_RRRD, 3, 0 ,0 }, |
|
+ { "rxf", OP8(0x00LL), MASK_RXF_RRRDR, INSTR_RXF_RRRDR,3, 0 ,0 }, |
|
+ { "rxy", OP8(0x00LL), MASK_RXY_RRRD, INSTR_RXY_RRRD, 3, 3 ,0 }, |
|
+ { "s", OP8(0x00LL), MASK_S_RD, INSTR_S_RD, 3, 0 ,0 }, |
|
+ { "si", OP8(0x00LL), MASK_SI_URD, INSTR_SI_URD, 3, 0 ,0 }, |
|
+ { "siy", OP8(0x00LL), MASK_SIY_URD, INSTR_SIY_URD, 3, 3 ,0 }, |
|
+ { "sil", OP8(0x00LL), MASK_SIL_RDI, INSTR_SIL_RDI, 3, 6 ,0 }, |
|
+ { "ss", OP8(0x00LL), MASK_SS_RRRDRD, INSTR_SS_RRRDRD,3, 0 ,0 }, |
|
+ { "sse", OP8(0x00LL), MASK_SSE_RDRD, INSTR_SSE_RDRD, 3, 0 ,0 }, |
|
+ { "ssf", OP8(0x00LL), MASK_SSF_RRDRD, INSTR_SSF_RRDRD,3, 0 ,0 }, |
|
}; |
|
|
|
const int s390_num_opformats = |
|
--- a/opcodes/s390-opc.txt |
|
+++ b/opcodes/s390-opc.txt |
|
@@ -262,10 +262,10 @@ a700 tmlh RI_RU "test under mask low high" g5 esa,zarch |
|
a700 tmh RI_RU "test under mask high" g5 esa,zarch |
|
a701 tmll RI_RU "test under mask low low" g5 esa,zarch |
|
a701 tml RI_RU "test under mask low" g5 esa,zarch |
|
-0700 nopr RR_0R_OPT "no operation" g5 esa,zarch |
|
+0700 nopr RR_0R "no operation" g5 esa,zarch optparm |
|
0700 b*8r RR_0R "conditional branch" g5 esa,zarch |
|
07f0 br RR_0R "unconditional branch" g5 esa,zarch |
|
-4700 nop RX_0RRD_OPT "no operation" g5 esa,zarch |
|
+4700 nop RX_0RRD "no operation" g5 esa,zarch optparm |
|
4700 b*8 RX_0RRD "conditional branch" g5 esa,zarch |
|
47f0 b RX_0RRD "unconditional branch" g5 esa,zarch |
|
a704 j*8 RI_0P "conditional jump" g5 esa,zarch |
|
@@ -298,7 +298,7 @@ b30d debr RRE_FF "divide short bfp" g5 esa,zarch |
|
ed000000000d deb RXE_FRRD "divide short bfp" g5 esa,zarch |
|
b35b didbr RRF_FUFF "divide to integer long bfp" g5 esa,zarch |
|
b353 diebr RRF_FUFF "divide to integer short bfp" g5 esa,zarch |
|
-b38c efpc RRE_RR_OPT "extract fpc" g5 esa,zarch |
|
+b38c efpc RRE_RR "extract fpc" g5 esa,zarch optparm |
|
b342 ltxbr RRE_FEFE "load and test extended bfp" g5 esa,zarch |
|
b312 ltdbr RRE_FF "load and test long bfp" g5 esa,zarch |
|
b302 ltebr RRE_FF "load and test short bfp" g5 esa,zarch |
|
@@ -341,7 +341,7 @@ b31f msdbr RRF_F0FF "multiply and subtract long bfp" g5 esa,zarch |
|
ed000000001f msdb RXF_FRRDF "multiply and subtract long bfp" g5 esa,zarch |
|
b30f msebr RRF_F0FF "multiply and subtract short bfp" g5 esa,zarch |
|
ed000000000f mseb RXF_FRRDF "multiply and subtract short bfp" g5 esa,zarch |
|
-b384 sfpc RRE_RR_OPT "set fpc" g5 esa,zarch |
|
+b384 sfpc RRE_RR "set fpc" g5 esa,zarch optparm |
|
b299 srnm S_RD "set rounding mode" g5 esa,zarch |
|
b316 sqxbr RRE_FEFE "square root extended bfp" g5 esa,zarch |
|
b315 sqdbr RRE_FF "square root long bfp" g5 esa,zarch |
|
@@ -765,21 +765,21 @@ c800 mvcos SSF_RRDRD "move with optional specifications" z9-109 zarch |
|
# z9-109 load page-table-entry address instruction |
|
b9aa lptea RRF_RURR2 "load page-table-entry address" z9-109 zarch |
|
# z9-109 conditional sske facility, sske instruction entered twice |
|
-b22b sske RRF_M0RR "set storage key extended" z9-109 zarch |
|
+b22b sske RRF_U0RR "set storage key extended" z9-109 zarch optparm |
|
# z9-109 etf2-enhancement facility, instructions entered twice |
|
-b993 troo RRF_M0RERE "translate one to one" z9-109 esa,zarch |
|
-b992 trot RRF_M0RERE "translate one to two" z9-109 esa,zarch |
|
-b991 trto RRF_M0RERE "translate two to one" z9-109 esa,zarch |
|
-b990 trtt RRF_M0RERE "translate two to two" z9-109 esa,zarch |
|
+b993 troo RRF_U0RERE "translate one to one" z9-109 esa,zarch optparm |
|
+b992 trot RRF_U0RERE "translate one to two" z9-109 esa,zarch optparm |
|
+b991 trto RRF_U0RERE "translate two to one" z9-109 esa,zarch optparm |
|
+b990 trtt RRF_U0RERE "translate two to two" z9-109 esa,zarch optparm |
|
# z9-109 etf3-enhancement facility, some instructions entered twice |
|
-b9b1 cu24 RRF_M0RERE "convert utf-16 to utf-32" z9-109 zarch |
|
-b2a6 cu21 RRF_M0RERE "convert utf-16 to utf-8" z9-109 zarch |
|
-b2a6 cuutf RRF_M0RERE "convert unicode to utf-8" z9-109 zarch |
|
+b9b1 cu24 RRF_U0RERE "convert utf-16 to utf-32" z9-109 zarch optparm |
|
+b2a6 cu21 RRF_U0RERE "convert utf-16 to utf-8" z9-109 zarch optparm |
|
+b2a6 cuutf RRF_U0RERE "convert unicode to utf-8" z9-109 zarch optparm |
|
b9b3 cu42 RRE_RERE "convert utf-32 to utf-16" z9-109 zarch |
|
b9b2 cu41 RRE_RERE "convert utf-32 to utf-8" z9-109 zarch |
|
-b2a7 cu12 RRF_M0RERE "convert utf-8 to utf-16" z9-109 zarch |
|
-b2a7 cutfu RRF_M0RERE "convert utf-8 to unicode" z9-109 zarch |
|
-b9b0 cu14 RRF_M0RERE "convert utf-8 to utf-32" z9-109 zarch |
|
+b2a7 cu12 RRF_U0RERE "convert utf-8 to utf-16" z9-109 zarch optparm |
|
+b2a7 cutfu RRF_U0RERE "convert utf-8 to unicode" z9-109 zarch optparm |
|
+b9b0 cu14 RRF_U0RERE "convert utf-8 to utf-32" z9-109 zarch optparm |
|
b9be srstu RRE_RR "search string unicode" z9-109 zarch |
|
d0 trtr SS_L0RDRD "tranlate and test reverse" z9-109 zarch |
|
# z9-109 unnormalized hfp multiply & multiply and add |
|
@@ -963,8 +963,8 @@ c600 exrl RIL_RP "execute relative long" z10 zarch |
|
af00 mc SI_URD "monitor call" z10 zarch |
|
b9a2 ptf RRE_R0 "perform topology function" z10 zarch |
|
b9af pfmf RRE_RR "perform frame management function" z10 zarch |
|
-b9bf trte RRF_M0RER "translate and test extended" z10 zarch |
|
-b9bd trtre RRF_M0RER "translate and test reverse extended" z10 zarch |
|
+b9bf trte RRF_U0RER "translate and test extended" z10 zarch optparm |
|
+b9bd trtre RRF_U0RER "translate and test reverse extended" z10 zarch optparm |
|
b2ed ecpga RRE_RR "extract coprocessor-group address" z10 zarch |
|
b2e4 ecctr RRE_RR "extract cpu counter" z10 zarch |
|
b2e5 epctr RRE_RR "extract peripheral counter" z10 zarch |
|
@@ -1128,7 +1128,7 @@ c7 bpp SMI_U0RDP "branch prediction preload" zEC12 zarch |
|
c5 bprp MII_UPP "branch prediction relative preload" zEC12 zarch |
|
b2e8 ppa RRF_U0RR "perform processor assist" zEC12 zarch |
|
b2fa niai IE_UU "next instruction access intent" zEC12 zarch |
|
-b98f crdte RRF_RMRR "compare and replace DAT table entry" zEC12 zarch |
|
+b98f crdte RRF_RURR2 "compare and replace DAT table entry" zEC12 zarch optparm |
|
e3000000009f lat RXY_RRRD "load and trap 32 bit" zEC12 zarch |
|
e30000000085 lgat RXY_RRRD "load and trap 64 bit" zEC12 zarch |
|
e300000000c8 lfhat RXY_RRRD "load high and trap" zEC12 zarch |
|
@@ -1143,3 +1143,516 @@ ed00000000aa cdzt RSL_LRDFU "convert from zoned long" zEC12 zarch |
|
ed00000000ab cxzt RSL_LRDFEU "convert from zoned extended" zEC12 zarch |
|
ed00000000a8 czdt RSL_LRDFU "convert to zoned long" zEC12 zarch |
|
ed00000000a9 czxt RSL_LRDFEU "convert to zoned extended" zEC12 zarch |
|
+ |
|
+# The new instructions of IBM z13 |
|
+ |
|
+e70000000027 lcbb RXE_RRRDU "load count to block boundary" z13 zarch |
|
+ |
|
+# Chapter 21 |
|
+e70000000013 vgef VRV_VVXRDU "vector gather element 4 byte elements" z13 zarch |
|
+e70000000012 vgeg VRV_VVXRDU "vector gather element 8 byte elements" z13 zarch |
|
+e70000000044 vgbm VRI_V0U "vector generate byte mask" z13 zarch |
|
+e70000000044 vzero VRI_V "vector set to zero" z13 zarch |
|
+e700ffff0044 vone VRI_V "vector set to ones" z13 zarch |
|
+e70000000046 vgm VRI_V0UUU "vector generate mask" z13 zarch |
|
+e70000000046 vgmb VRI_V0UU "vector generate mask byte" z13 zarch |
|
+e70000001046 vgmh VRI_V0UU "vector generate mask halfword" z13 zarch |
|
+e70000002046 vgmf VRI_V0UU "vector generate mask word" z13 zarch |
|
+e70000003046 vgmg VRI_V0UU "vector generate mask double word" z13 zarch |
|
+e70000000006 vl VRX_VRRD "vector memory load" z13 zarch |
|
+e70000000056 vlr VRX_VV "vector register load" z13 zarch |
|
+e70000000005 vlrep VRX_VRRDU "vector load and replicate" z13 zarch |
|
+e70000000005 vlrepb VRX_VRRD "vector load and replicate byte elements" z13 zarch |
|
+e70000001005 vlreph VRX_VRRD "vector load and replicate halfword elements" z13 zarch |
|
+e70000002005 vlrepf VRX_VRRD "vector load and replicate word elements" z13 zarch |
|
+e70000003005 vlrepg VRX_VRRD "vector load and replicate double word elements" z13 zarch |
|
+e70000000000 vleb VRX_VRRDU "vector load byte element" z13 zarch |
|
+e70000000001 vleh VRX_VRRDU "vector load halfword element" z13 zarch |
|
+e70000000003 vlef VRX_VRRDU "vector load word element" z13 zarch |
|
+e70000000002 vleg VRX_VRRDU "vector load double word element" z13 zarch |
|
+e70000000040 vleib VRI_V0IU "vector load byte element immediate" z13 zarch |
|
+e70000000041 vleih VRI_V0IU "vector load halfword element immediate" z13 zarch |
|
+e70000000043 vleif VRI_V0IU "vector load word element immediate" z13 zarch |
|
+e70000000042 vleig VRI_V0IU "vector load double word element immediate" z13 zarch |
|
+e70000000021 vlgv VRS_RVRDU "vector load gr from vr element" z13 zarch |
|
+e70000000021 vlgvb VRS_RVRD "vector load gr from vr byte element" z13 zarch |
|
+e70000001021 vlgvh VRS_RVRD "vector load gr from vr halfword element" z13 zarch |
|
+e70000002021 vlgvf VRS_RVRD "vector load gr from vr word element" z13 zarch |
|
+e70000003021 vlgvg VRS_RVRD "vector load gr from vr double word element" z13 zarch |
|
+e70000000004 vllez VRX_VRRDU "vector load logical element and zero" z13 zarch |
|
+e70000000004 vllezb VRX_VRRD "vector load logical byte element and zero" z13 zarch |
|
+e70000001004 vllezh VRX_VRRD "vector load logical halfword element and zero" z13 zarch |
|
+e70000002004 vllezf VRX_VRRD "vector load logical word element and zero" z13 zarch |
|
+e70000003004 vllezg VRX_VRRD "vector load logical double word element and zero" z13 zarch |
|
+e70000000036 vlm VRS_VVRD "vector load multiple" z13 zarch |
|
+e70000000007 vlbb VRX_VRRDU "vector load to block boundary" z13 zarch |
|
+e70000000022 vlvg VRS_VRRDU "vector load VR element from GR" z13 zarch |
|
+e70000000022 vlvgb VRS_VRRD "vector load VR byte element from GR" z13 zarch |
|
+e70000001022 vlvgh VRS_VRRD "vector load VR halfword element from GR" z13 zarch |
|
+e70000002022 vlvgf VRS_VRRD "vector load VR word element from GR" z13 zarch |
|
+e70000003022 vlvgg VRS_VRRD "vector load VR double word element from GR" z13 zarch |
|
+e70000000062 vlvgp VRR_VRR "vector load VR from GRs disjoint" z13 zarch |
|
+e70000000037 vll VRS_VRRD "vector load with length" z13 zarch |
|
+e70000000061 vmrh VRR_VVV0U "vector merge high" z13 zarch |
|
+e70000000061 vmrhb VRR_VVV "vector merge high byte" z13 zarch |
|
+e70000001061 vmrhh VRR_VVV "vector merge high halfword" z13 zarch |
|
+e70000002061 vmrhf VRR_VVV "vector merge high word" z13 zarch |
|
+e70000003061 vmrhg VRR_VVV "vector merge high double word" z13 zarch |
|
+e70000000060 vmrl VRR_VVV0U "vector merge low" z13 zarch |
|
+e70000000060 vmrlb VRR_VVV "vector merge low byte" z13 zarch |
|
+e70000001060 vmrlh VRR_VVV "vector merge low halfword" z13 zarch |
|
+e70000002060 vmrlf VRR_VVV "vector merge low word" z13 zarch |
|
+e70000003060 vmrlg VRR_VVV "vector merge low double word" z13 zarch |
|
+e70000000094 vpk VRR_VVV0U "vector pack" z13 zarch |
|
+e70000001094 vpkh VRR_VVV "vector pack halfword" z13 zarch |
|
+e70000002094 vpkf VRR_VVV "vector pack word" z13 zarch |
|
+e70000003094 vpkg VRR_VVV "vector pack double word" z13 zarch |
|
+e70000000097 vpks VRR_VVV0U0U "vector pack saturate" z13 zarch |
|
+e70000001097 vpksh VRR_VVV "vector pack saturate halfword" z13 zarch |
|
+e70000002097 vpksf VRR_VVV "vector pack saturate word" z13 zarch |
|
+e70000003097 vpksg VRR_VVV "vector pack saturate double word" z13 zarch |
|
+e70000101097 vpkshs VRR_VVV "vector pack saturate halfword" z13 zarch |
|
+e70000102097 vpksfs VRR_VVV "vector pack saturate word" z13 zarch |
|
+e70000103097 vpksgs VRR_VVV "vector pack saturate double word" z13 zarch |
|
+e70000000095 vpkls VRR_VVV0U0U "vector pack logical saturate" z13 zarch |
|
+e70000001095 vpklsh VRR_VVV "vector pack logical saturate halfword" z13 zarch |
|
+e70000002095 vpklsf VRR_VVV "vector pack logical saturate word" z13 zarch |
|
+e70000003095 vpklsg VRR_VVV "vector pack logical saturate double word" z13 zarch |
|
+e70000101095 vpklshs VRR_VVV "vector pack logical saturate halfword" z13 zarch |
|
+e70000102095 vpklsfs VRR_VVV "vector pack logical saturate word" z13 zarch |
|
+e70000103095 vpklsgs VRR_VVV "vector pack logical saturate double word" z13 zarch |
|
+e7000000008c vperm VRR_VVV0V "vector permute" z13 zarch |
|
+e70000000084 vpdi VRR_VVV0U "vector permute double word immediate" z13 zarch |
|
+e7000000004d vrep VRI_VVUU "vector replicate" z13 zarch |
|
+e7000000004d vrepb VRI_VVU "vector replicate byte" z13 zarch |
|
+e7000000104d vreph VRI_VVU "vector replicate halfword" z13 zarch |
|
+e7000000204d vrepf VRI_VVU "vector replicate word" z13 zarch |
|
+e7000000304d vrepg VRI_VVU "vector replicate double word" z13 zarch |
|
+e70000000045 vrepi VRI_V0IU "vector replicate immediate" z13 zarch |
|
+e70000000045 vrepib VRI_V0I "vector replicate immediate byte" z13 zarch |
|
+e70000001045 vrepih VRI_V0I "vector replicate immediate halfword" z13 zarch |
|
+e70000002045 vrepif VRI_V0I "vector replicate immediate word" z13 zarch |
|
+e70000003045 vrepig VRI_V0I "vector replicate immediate double word" z13 zarch |
|
+e7000000001b vscef VRV_VVXRDU "vector scatter element 4 byte" z13 zarch |
|
+e7000000001a vsceg VRV_VVXRDU "vector scatter element 8 byte" z13 zarch |
|
+e7000000008d vsel VRR_VVV0V "vector select" z13 zarch |
|
+e7000000005f vseg VRR_VV0U "vector sign extend to double word" z13 zarch |
|
+e7000000005f vsegb VRR_VV "vector sign extend byte to double word" z13 zarch |
|
+e7000000105f vsegh VRR_VV "vector sign extend halfword to double word" z13 zarch |
|
+e7000000205f vsegf VRR_VV "vector sign extend word to double word" z13 zarch |
|
+e7000000000e vst VRX_VRRD "vector store" z13 zarch |
|
+e70000000008 vsteb VRX_VRRDU "vector store byte element" z13 zarch |
|
+e70000000009 vsteh VRX_VRRDU "vector store halfword element" z13 zarch |
|
+e7000000000b vstef VRX_VRRDU "vector store word element" z13 zarch |
|
+e7000000000a vsteg VRX_VRRDU "vector store double word element" z13 zarch |
|
+e7000000003e vstm VRS_VVRD "vector store multiple" z13 zarch |
|
+e7000000003f vstl VRS_VRRD "vector store with length" z13 zarch |
|
+e700000000d7 vuph VRR_VV0U "vector unpack high" z13 zarch |
|
+e700000000d7 vuphb VRR_VV "vector unpack high byte" z13 zarch |
|
+e700000010d7 vuphh VRR_VV "vector unpack high halfword" z13 zarch |
|
+e700000020d7 vuphf VRR_VV "vector unpack high word" z13 zarch |
|
+e700000000d5 vuplh VRR_VV0U "vector unpack logical high" z13 zarch |
|
+e700000000d5 vuplhb VRR_VV "vector unpack logical high byte" z13 zarch |
|
+e700000010d5 vuplhh VRR_VV "vector unpack logical high halfword" z13 zarch |
|
+e700000020d5 vuplhf VRR_VV "vector unpack logical high word" z13 zarch |
|
+e700000000d6 vupl VRR_VV0U "vector unpack low" z13 zarch |
|
+e700000000d6 vuplb VRR_VV "vector unpack low byte" z13 zarch |
|
+e700000010d6 vuplhw VRR_VV "vector unpack low halfword" z13 zarch |
|
+e700000020d6 vuplf VRR_VV "vector unpack low word" z13 zarch |
|
+e700000000d4 vupll VRR_VV0U "vector unpack logical low" z13 zarch |
|
+e700000000d4 vupllb VRR_VV "vector unpack logical low byte" z13 zarch |
|
+e700000010d4 vupllh VRR_VV "vector unpack logical low halfword" z13 zarch |
|
+e700000020d4 vupllf VRR_VV "vector unpack logical low word" z13 zarch |
|
+ |
|
+# Chapter 22 |
|
+e700000000f3 va VRR_VVV0U "vector add" z13 zarch |
|
+e700000000f3 vab VRR_VVV "vector add byte" z13 zarch |
|
+e700000010f3 vah VRR_VVV "vector add halfword" z13 zarch |
|
+e700000020f3 vaf VRR_VVV "vector add word" z13 zarch |
|
+e700000030f3 vag VRR_VVV "vector add double word" z13 zarch |
|
+e700000040f3 vaq VRR_VVV "vector add quad word" z13 zarch |
|
+e700000000f1 vacc VRR_VVV0U "vector add compute carry" z13 zarch |
|
+e700000000f1 vaccb VRR_VVV "vector add compute carry byte" z13 zarch |
|
+e700000010f1 vacch VRR_VVV "vector add compute carry halfword" z13 zarch |
|
+e700000020f1 vaccf VRR_VVV "vector add compute carry word" z13 zarch |
|
+e700000030f1 vaccg VRR_VVV "vector add compute carry doubleword" z13 zarch |
|
+e700000040f1 vaccq VRR_VVV "vector add compute carry quadword" z13 zarch |
|
+e700000000bb vac VRR_VVVU0V "vector add with carry" z13 zarch |
|
+e700040000bb vacq VRR_VVV0V "vector add with carry quadword" z13 zarch |
|
+e700000000b9 vaccc VRR_VVVU0V "vector add with carry compute carry" z13 zarch |
|
+e700040000b9 vacccq VRR_VVV0V "vector add with carry compute carry quadword" z13 zarch |
|
+e70000000068 vn VRR_VVV "vector and" z13 zarch |
|
+e70000000069 vnc VRR_VVV "vector and with complement" z13 zarch |
|
+e700000000f2 vavg VRR_VVV0U "vector average" z13 zarch |
|
+e700000000f2 vavgb VRR_VVV "vector average byte" z13 zarch |
|
+e700000010f2 vavgh VRR_VVV "vector average half word" z13 zarch |
|
+e700000020f2 vavgf VRR_VVV "vector average word" z13 zarch |
|
+e700000030f2 vavgg VRR_VVV "vector average double word" z13 zarch |
|
+e700000000f0 vavgl VRR_VVV0U "vector average logical" z13 zarch |
|
+e700000000f0 vavglb VRR_VVV "vector average logical byte" z13 zarch |
|
+e700000010f0 vavglh VRR_VVV "vector average logical half word" z13 zarch |
|
+e700000020f0 vavglf VRR_VVV "vector average logical word" z13 zarch |
|
+e700000030f0 vavglg VRR_VVV "vector average logical double word" z13 zarch |
|
+e70000000066 vcksm VRR_VVV "vector checksum" z13 zarch |
|
+e700000000db vec VRR_VV0U "vector element compare" z13 zarch |
|
+e700000000db vecb VRR_VV "vector element compare byte" z13 zarch |
|
+e700000010db vech VRR_VV "vector element compare half word" z13 zarch |
|
+e700000020db vecf VRR_VV "vector element compare word" z13 zarch |
|
+e700000030db vecg VRR_VV "vector element compare double word" z13 zarch |
|
+e700000000d9 vecl VRR_VV0U "vector element compare logical" z13 zarch |
|
+e700000000d9 veclb VRR_VV "vector element compare logical byte" z13 zarch |
|
+e700000010d9 veclh VRR_VV "vector element compare logical half word" z13 zarch |
|
+e700000020d9 veclf VRR_VV "vector element compare logical word" z13 zarch |
|
+e700000030d9 veclg VRR_VV "vector element compare logical double word" z13 zarch |
|
+e700000000f8 vceq VRR_VVV0U0U "vector compare equal" z13 zarch |
|
+e700000000f8 vceqb VRR_VVV "vector compare equal byte" z13 zarch |
|
+e700000010f8 vceqh VRR_VVV "vector compare equal half word" z13 zarch |
|
+e700000020f8 vceqf VRR_VVV "vector compare equal word" z13 zarch |
|
+e700000030f8 vceqg VRR_VVV "vector compare equal double word" z13 zarch |
|
+e700001000f8 vceqbs VRR_VVV "vector compare equal byte" z13 zarch |
|
+e700001010f8 vceqhs VRR_VVV "vector compare equal half word" z13 zarch |
|
+e700001020f8 vceqfs VRR_VVV "vector compare equal word" z13 zarch |
|
+e700001030f8 vceqgs VRR_VVV "vector compare equal double word" z13 zarch |
|
+e700000000fb vch VRR_VVV0U0U "vector compare high" z13 zarch |
|
+e700000000fb vchb VRR_VVV "vector compare high byte" z13 zarch |
|
+e700000010fb vchh VRR_VVV "vector compare high half word" z13 zarch |
|
+e700000020fb vchf VRR_VVV "vector compare high word" z13 zarch |
|
+e700000030fb vchg VRR_VVV "vector compare high double word" z13 zarch |
|
+e700001000fb vchbs VRR_VVV "vector compare high byte" z13 zarch |
|
+e700001010fb vchhs VRR_VVV "vector compare high half word" z13 zarch |
|
+e700001020fb vchfs VRR_VVV "vector compare high word" z13 zarch |
|
+e700001030fb vchgs VRR_VVV "vector compare high double word" z13 zarch |
|
+e700000000f9 vchl VRR_VVV0U0U "vector compare high logical" z13 zarch |
|
+e700000000f9 vchlb VRR_VVV "vector compare high logical byte" z13 zarch |
|
+e700000010f9 vchlh VRR_VVV "vector compare high logical half word" z13 zarch |
|
+e700000020f9 vchlf VRR_VVV "vector compare high logical word" z13 zarch |
|
+e700000030f9 vchlg VRR_VVV "vector compare high logical double word" z13 zarch |
|
+e700001000f9 vchlbs VRR_VVV "vector compare high logical byte" z13 zarch |
|
+e700001010f9 vchlhs VRR_VVV "vector compare high logical half word" z13 zarch |
|
+e700001020f9 vchlfs VRR_VVV "vector compare high logical word" z13 zarch |
|
+e700001030f9 vchlgs VRR_VVV "vector compare high logical double word" z13 zarch |
|
+e70000000053 vclz VRR_VV0U "vector count leading zeros" z13 zarch |
|
+e70000000053 vclzb VRR_VV "vector count leading zeros byte" z13 zarch |
|
+e70000001053 vclzh VRR_VV "vector count leading zeros halfword" z13 zarch |
|
+e70000002053 vclzf VRR_VV "vector count leading zeros word" z13 zarch |
|
+e70000003053 vclzg VRR_VV "vector count leading zeros doubleword" z13 zarch |
|
+e70000000052 vctz VRR_VV0U "vector count trailing zeros" z13 zarch |
|
+e70000000052 vctzb VRR_VV "vector count trailing zeros byte" z13 zarch |
|
+e70000001052 vctzh VRR_VV "vector count trailing zeros halfword" z13 zarch |
|
+e70000002052 vctzf VRR_VV "vector count trailing zeros word" z13 zarch |
|
+e70000003052 vctzg VRR_VV "vector count trailing zeros doubleword" z13 zarch |
|
+e7000000006d vx VRR_VVV "vector exclusive or" z13 zarch |
|
+e700000000b4 vgfm VRR_VVV0U "vector galois field multiply sum" z13 zarch |
|
+e700000000b4 vgfmb VRR_VVV "vector galois field multiply sum byte" z13 zarch |
|
+e700000010b4 vgfmh VRR_VVV "vector galois field multiply sum halfword" z13 zarch |
|
+e700000020b4 vgfmf VRR_VVV "vector galois field multiply sum word" z13 zarch |
|
+e700000030b4 vgfmg VRR_VVV "vector galois field multiply sum doubleword" z13 zarch |
|
+e700000000bc vgfma VRR_VVVU0V "vector galois field multiply sum and accumulate" z13 zarch |
|
+e700000000bc vgfmab VRR_VVV0V "vector galois field multiply sum and accumulate byte" z13 zarch |
|
+e700010000bc vgfmah VRR_VVV0V "vector galois field multiply sum and accumulate halfword" z13 zarch |
|
+e700020000bc vgfmaf VRR_VVV0V "vector galois field multiply sum and accumulate word" z13 zarch |
|
+e700030000bc vgfmag VRR_VVV0V "vector galois field multiply sum and accumulate doubleword" z13 zarch |
|
+e700000000de vlc VRR_VV0U "vector load complement" z13 zarch |
|
+e700000000de vlcb VRR_VV "vector load complement byte" z13 zarch |
|
+e700000010de vlch VRR_VV "vector load complement halfword" z13 zarch |
|
+e700000020de vlcf VRR_VV "vector load complement word" z13 zarch |
|
+e700000030de vlcg VRR_VV "vector load complement doubleword" z13 zarch |
|
+e700000000df vlp VRR_VV0U "vector load positive" z13 zarch |
|
+e700000000df vlpb VRR_VV "vector load positive byte" z13 zarch |
|
+e700000010df vlph VRR_VV "vector load positive halfword" z13 zarch |
|
+e700000020df vlpf VRR_VV "vector load positive word" z13 zarch |
|
+e700000030df vlpg VRR_VV "vector load positive doubleword" z13 zarch |
|
+e700000000ff vmx VRR_VVV0U "vector maximum" z13 zarch |
|
+e700000000ff vmxb VRR_VVV "vector maximum byte" z13 zarch |
|
+e700000010ff vmxh VRR_VVV "vector maximum halfword" z13 zarch |
|
+e700000020ff vmxf VRR_VVV "vector maximum word" z13 zarch |
|
+e700000030ff vmxg VRR_VVV "vector maximum doubleword" z13 zarch |
|
+e700000000fd vmxl VRR_VVV0U "vector maximum logical" z13 zarch |
|
+e700000000fd vmxlb VRR_VVV "vector maximum logical byte" z13 zarch |
|
+e700000010fd vmxlh VRR_VVV "vector maximum logical halfword" z13 zarch |
|
+e700000020fd vmxlf VRR_VVV "vector maximum logical word" z13 zarch |
|
+e700000030fd vmxlg VRR_VVV "vector maximum logical doubleword" z13 zarch |
|
+e700000000fe vmn VRR_VVV0U "vector minimum" z13 zarch |
|
+e700000000fe vmnb VRR_VVV "vector minimum byte" z13 zarch |
|
+e700000010fe vmnh VRR_VVV "vector minimum halfword" z13 zarch |
|
+e700000020fe vmnf VRR_VVV "vector minimum word" z13 zarch |
|
+e700000030fe vmng VRR_VVV "vector minimum doubleword" z13 zarch |
|
+e700000000fc vmnl VRR_VVV0U "vector minimum logical" z13 zarch |
|
+e700000000fc vmnlb VRR_VVV "vector minimum logical byte" z13 zarch |
|
+e700000010fc vmnlh VRR_VVV "vector minimum logical halfword" z13 zarch |
|
+e700000020fc vmnlf VRR_VVV "vector minimum logical word" z13 zarch |
|
+e700000030fc vmnlg VRR_VVV "vector minimum logical doubleword" z13 zarch |
|
+e700000000aa vmal VRR_VVVU0V "vector multiply and add low" z13 zarch |
|
+e700000000aa vmalb VRR_VVV0V "vector multiply and add low byte" z13 zarch |
|
+e700010000aa vmalhw VRR_VVV0V "vector multiply and add low halfword" z13 zarch |
|
+e700020000aa vmalf VRR_VVV0V "vector multiply and add low word" z13 zarch |
|
+e700000000ab vmah VRR_VVVU0V "vector multiply and add high" z13 zarch |
|
+e700000000ab vmahb VRR_VVV0V "vector multiply and add high byte" z13 zarch |
|
+e700010000ab vmahh VRR_VVV0V "vector multiply and add high halfword" z13 zarch |
|
+e700020000ab vmahf VRR_VVV0V "vector multiply and add high word" z13 zarch |
|
+e700000000a9 vmalh VRR_VVVU0V "vector multiply and add logical high" z13 zarch |
|
+e700000000a9 vmalhb VRR_VVV0V "vector multiply and add logical high byte" z13 zarch |
|
+e700010000a9 vmalhh VRR_VVV0V "vector multiply and add logical high halfword" z13 zarch |
|
+e700020000a9 vmalhf VRR_VVV0V "vector multiply and add logical high word" z13 zarch |
|
+e700000000ae vmae VRR_VVVU0V "vector multiply and add even" z13 zarch |
|
+e700000000ae vmaeb VRR_VVV0V "vector multiply and add even byte" z13 zarch |
|
+e700010000ae vmaeh VRR_VVV0V "vector multiply and add even halfword" z13 zarch |
|
+e700020000ae vmaef VRR_VVV0V "vector multiply and add even word" z13 zarch |
|
+e700000000ac vmale VRR_VVVU0V "vector multiply and add logical even" z13 zarch |
|
+e700000000ac vmaleb VRR_VVV0V "vector multiply and add logical even byte" z13 zarch |
|
+e700010000ac vmaleh VRR_VVV0V "vector multiply and add logical even halfword" z13 zarch |
|
+e700020000ac vmalef VRR_VVV0V "vector multiply and add logical even word" z13 zarch |
|
+e700000000af vmao VRR_VVVU0V "vector multiply and add odd" z13 zarch |
|
+e700000000af vmaob VRR_VVV0V "vector multiply and add odd byte" z13 zarch |
|
+e700010000af vmaoh VRR_VVV0V "vector multiply and add odd halfword" z13 zarch |
|
+e700020000af vmaof VRR_VVV0V "vector multiply and add odd word" z13 zarch |
|
+e700000000ad vmalo VRR_VVVU0V "vector multiply and add logical odd" z13 zarch |
|
+e700000000ad vmalob VRR_VVV0V "vector multiply and add logical odd byte" z13 zarch |
|
+e700010000ad vmaloh VRR_VVV0V "vector multiply and add logical odd halfword" z13 zarch |
|
+e700020000ad vmalof VRR_VVV0V "vector multiply and add logical odd word" z13 zarch |
|
+e700000000a3 vmh VRR_VVV0U "vector multiply high" z13 zarch |
|
+e700000000a3 vmhb VRR_VVV "vector multiply high byte" z13 zarch |
|
+e700000010a3 vmhh VRR_VVV "vector multiply high halfword" z13 zarch |
|
+e700000020a3 vmhf VRR_VVV "vector multiply high word" z13 zarch |
|
+e700000000a1 vmlh VRR_VVV0U "vector multiply logical high" z13 zarch |
|
+e700000000a1 vmlhb VRR_VVV "vector multiply logical high byte" z13 zarch |
|
+e700000010a1 vmlhh VRR_VVV "vector multiply logical high halfword" z13 zarch |
|
+e700000020a1 vmlhf VRR_VVV "vector multiply logical high word" z13 zarch |
|
+e700000000a2 vml VRR_VVV0U "vector multiply low" z13 zarch |
|
+e700000000a2 vmlb VRR_VVV "vector multiply low byte" z13 zarch |
|
+e700000010a2 vmlhw VRR_VVV "vector multiply low halfword" z13 zarch |
|
+e700000020a2 vmlf VRR_VVV "vector multiply low word" z13 zarch |
|
+e700000000a6 vme VRR_VVV0U "vector multiply even" z13 zarch |
|
+e700000000a6 vmeb VRR_VVV "vector multiply even byte" z13 zarch |
|
+e700000010a6 vmeh VRR_VVV "vector multiply even halfword" z13 zarch |
|
+e700000020a6 vmef VRR_VVV "vector multiply even word" z13 zarch |
|
+e700000000a4 vmle VRR_VVV0U "vector multiply logical even" z13 zarch |
|
+e700000000a4 vmleb VRR_VVV "vector multiply logical even byte" z13 zarch |
|
+e700000010a4 vmleh VRR_VVV "vector multiply logical even halfword" z13 zarch |
|
+e700000020a4 vmlef VRR_VVV "vector multiply logical even word" z13 zarch |
|
+e700000000a7 vmo VRR_VVV0U "vector multiply odd" z13 zarch |
|
+e700000000a7 vmob VRR_VVV "vector multiply odd byte" z13 zarch |
|
+e700000010a7 vmoh VRR_VVV "vector multiply odd halfword" z13 zarch |
|
+e700000020a7 vmof VRR_VVV "vector multiply odd word" z13 zarch |
|
+e700000000a5 vmlo VRR_VVV0U "vector multiply logical odd" z13 zarch |
|
+e700000000a5 vmlob VRR_VVV "vector multiply logical odd byte" z13 zarch |
|
+e700000010a5 vmloh VRR_VVV "vector multiply logical odd halfword" z13 zarch |
|
+e700000020a5 vmlof VRR_VVV "vector multiply logical odd word" z13 zarch |
|
+e7000000006b vno VRR_VVV "vector nor" z13 zarch |
|
+e7000000006b vnot VRR_VVV2 "vector not" z13 zarch |
|
+e7000000006a vo VRR_VVV "vector or" z13 zarch |
|
+e70000000050 vpopct VRR_VV0U "vector population count" z13 zarch |
|
+e70000000073 verllv VRR_VVV0U "vector element rotate left logical reg" z13 zarch |
|
+e70000000073 verllvb VRR_VVV "vector element rotate left logical reg byte" z13 zarch |
|
+e70000001073 verllvh VRR_VVV "vector element rotate left logical reg halfword" z13 zarch |
|
+e70000002073 verllvf VRR_VVV "vector element rotate left logical reg word" z13 zarch |
|
+e70000003073 verllvg VRR_VVV "vector element rotate left logical reg doubleword" z13 zarch |
|
+e70000000033 verll VRS_VVRDU "vector element rotate left logical mem" z13 zarch |
|
+e70000000033 verllb VRS_VVRD "vector element rotate left logical mem byte" z13 zarch |
|
+e70000001033 verllh VRS_VVRD "vector element rotate left logical mem halfword" z13 zarch |
|
+e70000002033 verllf VRS_VVRD "vector element rotate left logical mem word" z13 zarch |
|
+e70000003033 verllg VRS_VVRD "vector element rotate left logical mem doubleword" z13 zarch |
|
+e70000000072 verim VRI_VVV0UU "vector element rotate and insert under mask" z13 zarch |
|
+e70000000072 verimb VRI_VVV0U "vector element rotate and insert under mask byte" z13 zarch |
|
+e70000001072 verimh VRI_VVV0U "vector element rotate and insert under mask halfword" z13 zarch |
|
+e70000002072 verimf VRI_VVV0U "vector element rotate and insert under mask word" z13 zarch |
|
+e70000003072 verimg VRI_VVV0U "vector element rotate and insert under mask doubleword" z13 zarch |
|
+e70000000070 veslv VRR_VVV0U "vector element shift left reg" z13 zarch |
|
+e70000000070 veslvb VRR_VVV "vector element shift left reg byte" z13 zarch |
|
+e70000001070 veslvh VRR_VVV "vector element shift left reg halfword" z13 zarch |
|
+e70000002070 veslvf VRR_VVV "vector element shift left reg word" z13 zarch |
|
+e70000003070 veslvg VRR_VVV "vector element shift left reg doubleword" z13 zarch |
|
+e70000000030 vesl VRS_VVRDU "vector element shift left mem" z13 zarch |
|
+e70000000030 veslb VRS_VVRD "vector element shift left mem byte" z13 zarch |
|
+e70000001030 veslh VRS_VVRD "vector element shift left mem halfword" z13 zarch |
|
+e70000002030 veslf VRS_VVRD "vector element shift left mem word" z13 zarch |
|
+e70000003030 veslg VRS_VVRD "vector element shift left mem doubleword" z13 zarch |
|
+e7000000007a vesrav VRR_VVV0U "vector element shift right arithmetic reg" z13 zarch |
|
+e7000000007a vesravb VRR_VVV "vector element shift right arithmetic reg byte" z13 zarch |
|
+e7000000107a vesravh VRR_VVV "vector element shift right arithmetic reg halfword" z13 zarch |
|
+e7000000207a vesravf VRR_VVV "vector element shift right arithmetic reg word" z13 zarch |
|
+e7000000307a vesravg VRR_VVV "vector element shift right arithmetic reg doubleword" z13 zarch |
|
+e7000000003a vesra VRS_VVRDU "vector element shift right arithmetic mem" z13 zarch |
|
+e7000000003a vesrab VRS_VVRD "vector element shift right arithmetic mem byte" z13 zarch |
|
+e7000000103a vesrah VRS_VVRD "vector element shift right arithmetic mem halfword" z13 zarch |
|
+e7000000203a vesraf VRS_VVRD "vector element shift right arithmetic mem word" z13 zarch |
|
+e7000000303a vesrag VRS_VVRD "vector element shift right arithmetic mem doubleword" z13 zarch |
|
+e70000000078 vesrlv VRR_VVV0U "vector element shift right logical reg" z13 zarch |
|
+e70000000078 vesrlvb VRR_VVV "vector element shift right logical reg byte" z13 zarch |
|
+e70000001078 vesrlvh VRR_VVV "vector element shift right logical reg halfword" z13 zarch |
|
+e70000002078 vesrlvf VRR_VVV "vector element shift right logical reg word" z13 zarch |
|
+e70000003078 vesrlvg VRR_VVV "vector element shift right logical reg doubleword" z13 zarch |
|
+e70000000038 vesrl VRS_VVRDU "vector element shift right logical mem" z13 zarch |
|
+e70000000038 vesrlb VRS_VVRD "vector element shift right logical mem byte" z13 zarch |
|
+e70000001038 vesrlh VRS_VVRD "vector element shift right logical mem halfword" z13 zarch |
|
+e70000002038 vesrlf VRS_VVRD "vector element shift right logical mem word" z13 zarch |
|
+e70000003038 vesrlg VRS_VVRD "vector element shift right logical mem doubleword" z13 zarch |
|
+e70000000074 vsl VRR_VVV "vector shift left" z13 zarch |
|
+e70000000075 vslb VRR_VVV "vector shift left by byte" z13 zarch |
|
+e70000000077 vsldb VRI_VVV0U "vector shift left double by byte" z13 zarch |
|
+e7000000007e vsra VRR_VVV "vector shift right arithmetic" z13 zarch |
|
+e7000000007f vsrab VRR_VVV "vector shift right arithmetic by byte" z13 zarch |
|
+e7000000007c vsrl VRR_VVV "vector shift right logical" z13 zarch |
|
+e7000000007d vsrlb VRR_VVV "vector shift right logical by byte" z13 zarch |
|
+e700000000f7 vs VRR_VVV0U "vector subtract" z13 zarch |
|
+e700000000f7 vsb VRR_VVV "vector subtract byte" z13 zarch |
|
+e700000010f7 vsh VRR_VVV "vector subtract halfword" z13 zarch |
|
+e700000020f7 vsf VRR_VVV "vector subtract word" z13 zarch |
|
+e700000030f7 vsg VRR_VVV "vector subtract doubleword" z13 zarch |
|
+e700000040f7 vsq VRR_VVV "vector subtract quadword" z13 zarch |
|
+e700000000f5 vscbi VRR_VVV0U "vector subtract compute borrow indication" z13 zarch |
|
+e700000000f5 vscbib VRR_VVV "vector subtract compute borrow indication byte" z13 zarch |
|
+e700000010f5 vscbih VRR_VVV "vector subtract compute borrow indication halfword" z13 zarch |
|
+e700000020f5 vscbif VRR_VVV "vector subtract compute borrow indication word" z13 zarch |
|
+e700000030f5 vscbig VRR_VVV "vector subtract compute borrow indication doubleword" z13 zarch |
|
+e700000040f5 vscbiq VRR_VVV "vector subtract compute borrow indication quadword" z13 zarch |
|
+e700000000bf vsbi VRR_VVVU0V "vector subtract with borrow indication" z13 zarch |
|
+e700040000bf vsbiq VRR_VVV0V "vector subtract with borrow indication quadword" z13 zarch |
|
+e700000000bd vsbcbi VRR_VVVU0V "vector subtract with borrow compute borrow indication" z13 zarch |
|
+e700040000bd vsbcbiq VRR_VVV0V "vector subtract with borrow compute borrow indication quadword" z13 zarch |
|
+e70000000065 vsumg VRR_VVV0U "vector sum across doubleword" z13 zarch |
|
+e70000001065 vsumgh VRR_VVV "vector sum across doubleword - halfword" z13 zarch |
|
+e70000002065 vsumgf VRR_VVV "vector sum across doubleword - word" z13 zarch |
|
+e70000000067 vsumq VRR_VVV0U "vector sum across quadword" z13 zarch |
|
+e70000002067 vsumqf VRR_VVV "vector sum across quadword - word elements" z13 zarch |
|
+e70000003067 vsumqg VRR_VVV "vector sum across quadword - doubleword elements" z13 zarch |
|
+e70000000064 vsum VRR_VVV0U "vector sum across word" z13 zarch |
|
+e70000000064 vsumb VRR_VVV "vector sum across word - byte elements" z13 zarch |
|
+e70000001064 vsumh VRR_VVV "vector sum across word - halfword elements" z13 zarch |
|
+e700000000d8 vtm VRR_VV "vector test under mask" z13 zarch |
|
+ |
|
+# Chapter 23 - Vector String Instructions |
|
+e70000000082 vfae VRR_VVV0U0U "vector find any element equal" z13 zarch optparm |
|
+e70000000082 vfaeb VRR_VVV0U0 "vector find any element equal byte" z13 zarch optparm |
|
+e70000001082 vfaeh VRR_VVV0U0 "vector find any element equal halfword" z13 zarch optparm |
|
+e70000002082 vfaef VRR_VVV0U0 "vector find any element equal word" z13 zarch optparm |
|
+e70000100082 vfaebs VRR_VVV0U1 "vector find any element equal" z13 zarch optparm |
|
+e70000101082 vfaehs VRR_VVV0U1 "vector find any element equal" z13 zarch optparm |
|
+e70000102082 vfaefs VRR_VVV0U1 "vector find any element equal" z13 zarch optparm |
|
+e70000200082 vfaezb VRR_VVV0U2 "vector find any element equal" z13 zarch optparm |
|
+e70000201082 vfaezh VRR_VVV0U2 "vector find any element equal" z13 zarch optparm |
|
+e70000202082 vfaezf VRR_VVV0U2 "vector find any element equal" z13 zarch optparm |
|
+e70000300082 vfaezbs VRR_VVV0U3 "vector find any element equal" z13 zarch optparm |
|
+e70000301082 vfaezhs VRR_VVV0U3 "vector find any element equal" z13 zarch optparm |
|
+e70000302082 vfaezfs VRR_VVV0U3 "vector find any element equal" z13 zarch optparm |
|
+e70000000080 vfee VRR_VVV0U0U "vector find element equal" z13 zarch optparm |
|
+e70000000080 vfeeb VRR_VVV0U0 "vector find element equal byte" z13 zarch optparm |
|
+e70000001080 vfeeh VRR_VVV0U0 "vector find element equal halfword" z13 zarch optparm |
|
+e70000002080 vfeef VRR_VVV0U0 "vector find element equal word" z13 zarch optparm |
|
+e70000100080 vfeebs VRR_VVV "vector find element equal byte" z13 zarch |
|
+e70000101080 vfeehs VRR_VVV "vector find element equal halfword" z13 zarch |
|
+e70000102080 vfeefs VRR_VVV "vector find element equal word" z13 zarch |
|
+e70000200080 vfeezb VRR_VVV "vector find element equal byte" z13 zarch |
|
+e70000201080 vfeezh VRR_VVV "vector find element equal halfword" z13 zarch |
|
+e70000202080 vfeezf VRR_VVV "vector find element equal word" z13 zarch |
|
+e70000300080 vfeezbs VRR_VVV "vector find element equal byte" z13 zarch |
|
+e70000301080 vfeezhs VRR_VVV "vector find element equal halfword" z13 zarch |
|
+e70000302080 vfeezfs VRR_VVV "vector find element equal word" z13 zarch |
|
+e70000000081 vfene VRR_VVV0U0U "vector find element not equal" z13 zarch optparm |
|
+e70000000081 vfeneb VRR_VVV0U0 "vector find element not equal byte" z13 zarch optparm |
|
+e70000001081 vfeneh VRR_VVV0U0 "vector find element not equal halfword" z13 zarch optparm |
|
+e70000002081 vfenef VRR_VVV0U0 "vector find element not equal word" z13 zarch optparm |
|
+e70000100081 vfenebs VRR_VVV "vector find element not equal byte" z13 zarch |
|
+e70000101081 vfenehs VRR_VVV "vector find element not equal halfword" z13 zarch |
|
+e70000102081 vfenefs VRR_VVV "vector find element not equal word" z13 zarch |
|
+e70000200081 vfenezb VRR_VVV "vector find element not equal byte" z13 zarch |
|
+e70000201081 vfenezh VRR_VVV "vector find element not equal halfword" z13 zarch |
|
+e70000202081 vfenezf VRR_VVV "vector find element not equal word" z13 zarch |
|
+e70000300081 vfenezbs VRR_VVV "vector find element not equal byte" z13 zarch |
|
+e70000301081 vfenezhs VRR_VVV "vector find element not equal halfword" z13 zarch |
|
+e70000302081 vfenezfs VRR_VVV "vector find element not equal word" z13 zarch |
|
+e7000000005c vistr VRR_VV0U0U "vector isolate string" z13 zarch optparm |
|
+e7000000005c vistrb VRR_VV0U2 "vector isolate string byte" z13 zarch optparm |
|
+e7000000105c vistrh VRR_VV0U2 "vector isolate string halfword" z13 zarch optparm |
|
+e7000000205c vistrf VRR_VV0U2 "vector isolate string word" z13 zarch optparm |
|
+e7000010005c vistrbs VRR_VV "vector isolate string byte" z13 zarch |
|
+e7000010105c vistrhs VRR_VV "vector isolate string halfword" z13 zarch |
|
+e7000010205c vistrfs VRR_VV "vector isolate string word" z13 zarch |
|
+e7000000008a vstrc VRR_VVVUU0V "vector string range compare" z13 zarch optparm |
|
+e7000000008a vstrcb VRR_VVVU0VB "vector string range compare byte" z13 zarch optparm |
|
+e7000100008a vstrch VRR_VVVU0VB "vector string range compare halfword" z13 zarch optparm |
|
+e7000200008a vstrcf VRR_VVVU0VB "vector string range compare word" z13 zarch optparm |
|
+e7000010008a vstrcbs VRR_VVVU0VB1 "vector string range compare byte" z13 zarch optparm |
|
+e7000110008a vstrchs VRR_VVVU0VB1 "vector string range compare halfword" z13 zarch optparm |
|
+e7000210008a vstrcfs VRR_VVVU0VB1 "vector string range compare word" z13 zarch optparm |
|
+e7000020008a vstrczb VRR_VVVU0VB2 "vector string range compare byte" z13 zarch optparm |
|
+e7000120008a vstrczh VRR_VVVU0VB2 "vector string range compare halfword" z13 zarch optparm |
|
+e7000220008a vstrczf VRR_VVVU0VB2 "vector string range compare word" z13 zarch optparm |
|
+e7000030008a vstrczbs VRR_VVVU0VB3 "vector string range compare byte" z13 zarch optparm |
|
+e7000130008a vstrczhs VRR_VVVU0VB3 "vector string range compare halfword" z13 zarch optparm |
|
+e7000230008a vstrczfs VRR_VVVU0VB3 "vector string range compare word" z13 zarch optparm |
|
+ |
|
+# Chapter 24 |
|
+e700000000e3 vfa VRR_VVV0UU "vector fp add" z13 zarch |
|
+e700000030e3 vfadb VRR_VVV "vector fp add" z13 zarch |
|
+e700000830e3 wfadb VRR_VVV "vector fp add" z13 zarch |
|
+e700000000cb wfc VRR_VV0UU "vector fp compare scalar" z13 zarch |
|
+e700000030cb wfcdb VRR_VV "vector fp compare scalar" z13 zarch |
|
+e700000000ca wfk VRR_VV0UU "vector fp compare and signal scalar" z13 zarch |
|
+e700000030ca wfkdb VRR_VV "vector fp compare and signal scalar" z13 zarch |
|
+e700000000e8 vfce VRR_VVV "vector fp compare equal" z13 zarch |
|
+e700000030e8 vfcedb VRR_VVV "vector fp compare equal" z13 zarch |
|
+e700001030e8 vfcedbs VRR_VVV "vector fp compare equal" z13 zarch |
|
+e700000830e8 wfcedb VRR_VVV "vector fp compare equal" z13 zarch |
|
+e700001830e8 wfcedbs VRR_VVV "vector fp compare equal" z13 zarch |
|
+e700000000eb vfch VRR_VVV0UUU "vector fp compare high" z13 zarch |
|
+e700000030eb vfchdb VRR_VVV "vector fp compare high" z13 zarch |
|
+e700001030eb vfchdbs VRR_VVV "vector fp compare high" z13 zarch |
|
+e700000830eb wfchdb VRR_VVV "vector fp compare high" z13 zarch |
|
+e700001830eb wfchdbs VRR_VVV "vector fp compare high" z13 zarch |
|
+e700000000ea vfche VRR_VVV0UUU "vector fp compare high or equal" z13 zarch |
|
+e700000030ea vfchedb VRR_VVV "vector fp compare high or equal" z13 zarch |
|
+e700001030ea vfchedbs VRR_VVV "vector fp compare high or equal" z13 zarch |
|
+e700000830ea wfchedb VRR_VVV "vector fp compare high or equal" z13 zarch |
|
+e700001830ea wfchedbs VRR_VVV "vector fp compare high or equal" z13 zarch |
|
+e700000000c3 vcdg VRR_VV0UUU "vector fp convert from fixed 64 bit" z13 zarch |
|
+e700000030c3 vcdgb VRR_VV0UU "vector fp convert from fixed 64 bit" z13 zarch |
|
+e700000830c3 wcdgb VRR_VV0UU8 "vector fp convert from fixed 64 bit" z13 zarch |
|
+e700000000c1 vcdlg VRR_VV0UUU "vector fp convert from logical 64 bit" z13 zarch |
|
+e700000030c1 vcdlgb VRR_VV0UU "vector fp convert from logical 64 bit" z13 zarch |
|
+e700000830c1 wcdlgb VRR_VV0UU8 "vector fp convert from logical 64 bit" z13 zarch |
|
+e700000000c2 vcgd VRR_VV0UUU "vector fp convert to fixed 64 bit" z13 zarch |
|
+e700000030c2 vcgdb VRR_VV0UU "vector fp convert to fixed 64 bit" z13 zarch |
|
+e700000830c2 wcgdb VRR_VV0UU8 "vector fp convert to fixed 64 bit" z13 zarch |
|
+e700000000c0 vclgd VRR_VV0UUU "vector fp convert to logical 64 bit" z13 zarch |
|
+e700000030c0 vclgdb VRR_VV0UU "vector fp convert to logical 64 bit" z13 zarch |
|
+e700000830c0 wclgdb VRR_VV0UU8 "vector fp convert to logical 64 bit" z13 zarch |
|
+e700000000e5 vfd VRR_VVV0UU "vector fp divide" z13 zarch |
|
+e700000030e5 vfddb VRR_VVV "vector fp divide" z13 zarch |
|
+e700000830e5 wfddb VRR_VVV "vector fp divide" z13 zarch |
|
+e700000000c7 vfi VRR_VV0UUU "vector load fp integer" z13 zarch |
|
+e700000030c7 vfidb VRR_VV0UU "vector load fp integer" z13 zarch |
|
+e700000830c7 wfidb VRR_VV0UU8 "vector load fp integer" z13 zarch |
|
+e700000000c4 vlde VRR_VV0UU "vector fp load lengthened" z13 zarch |
|
+e700000020c4 vldeb VRR_VV "vector fp load lengthened" z13 zarch |
|
+e700000820c4 wldeb VRR_VV "vector fp load lengthened" z13 zarch |
|
+e700000000c5 vled VRR_VV0UUU "vector fp load rounded" z13 zarch |
|
+e700000030c5 vledb VRR_VV0UU "vector fp load rounded" z13 zarch |
|
+e700000830c5 wledb VRR_VV0UU8 "vector fp load rounded" z13 zarch |
|
+e700000000e7 vfm VRR_VVV0UU "vector fp multiply" z13 zarch |
|
+e700000030e7 vfmdb VRR_VVV "vector fp multiply" z13 zarch |
|
+e700000830e7 wfmdb VRR_VVV "vector fp multiply" z13 zarch |
|
+e7000000008f vfma VRR_VVVU0UV "vector fp multiply and add" z13 zarch |
|
+e7000300008f vfmadb VRR_VVVV "vector fp multiply and add" z13 zarch |
|
+e7000308008f wfmadb VRR_VVVV "vector fp multiply and add" z13 zarch |
|
+e7000000008e vfms VRR_VVVU0UV "vector fp multiply and subtract" z13 zarch |
|
+e7000300008e vfmsdb VRR_VVVV "vector fp multiply and subtract" z13 zarch |
|
+e7000308008e wfmsdb VRR_VVVV "vector fp multiply and subtract" z13 zarch |
|
+e700000000cc vfpso VRR_VV0UUU "vector fp perform sign operation" z13 zarch |
|
+e700000030cc vfpsodb VRR_VV0U2 "vector fp perform sign operation" z13 zarch |
|
+e700000830cc wfpsodb VRR_VV0U2 "vector fp perform sign operation" z13 zarch |
|
+e700000030cc vflcdb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700000830cc wflcdb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700001030cc vflndb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700001830cc wflndb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700002030cc vflpdb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700002830cc wflpdb VRR_VV "vector fp perform sign operation" z13 zarch |
|
+e700000000ce vfsq VRR_VV0UU "vector fp square root" z13 zarch |
|
+e700000030ce vfsqdb VRR_VV "vector fp square root" z13 zarch |
|
+e700000830ce wfsqdb VRR_VV "vector fp square root" z13 zarch |
|
+e700000000e2 vfs VRR_VVV0UU "vector fp subtract" z13 zarch |
|
+e700000030e2 vfsdb VRR_VVV "vector fp subtract" z13 zarch |
|
+e700000830e2 wfsdb VRR_VVV "vector fp subtract" z13 zarch |
|
+e7000000004a vftci VRI_VVUUU "vector fp test data class immediate" z13 zarch |
|
+e7000000304a vftcidb VRI_VVU2 "vector fp test data class immediate" z13 zarch |
|
+e7000008304a wftcidb VRI_VVU2 "vector fp test data class immediate" z13 zarch
|
|
|