From c0218c03294d692613596cdf361badaa63a700d6 Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Tue, 24 Aug 2010 13:23:23 +0200 Subject: [PATCH 15/17] S/390 support --- configure | 2 + cpu-all.h | 2 +- cpu-defs.h | 8 + cpu-exec.c | 14 +- default-configs/s390x-linux-user.mak | 1 + disas.c | 3 + linux-user/elfload.c | 18 + linux-user/main.c | 89 ++ linux-user/s390x/syscall.h | 25 + linux-user/s390x/syscall_nr.h | 348 +++++ linux-user/s390x/target_signal.h | 26 + linux-user/s390x/termbits.h | 283 ++++ linux-user/signal.c | 314 ++++ linux-user/syscall.c | 144 ++- linux-user/syscall_defs.h | 56 +- s390x.ld | 194 +++ scripts/qemu-binfmt-conf.sh | 5 +- target-s390x/cpu.h | 29 +- target-s390x/exec.h | 8 + target-s390x/helper.c | 25 +- target-s390x/helpers.h | 127 ++ target-s390x/op_helper.c | 1607 +++++++++++++++++++ target-s390x/translate.c | 2795 ++++++++++++++++++++++++++++++++++ tcg/tcg-op.h | 12 + tcg/tcg-opc.h | 2 + tcg/tcg.c | 6 + 26 files changed, 6102 insertions(+), 41 deletions(-) create mode 100644 default-configs/s390x-linux-user.mak create mode 100644 linux-user/s390x/syscall.h create mode 100644 linux-user/s390x/syscall_nr.h create mode 100644 linux-user/s390x/target_signal.h create mode 100644 linux-user/s390x/termbits.h create mode 100644 s390x.ld create mode 100644 target-s390x/helpers.h Index: qemu-0.14.1/configure =================================================================== --- qemu-0.14.1.orig/configure +++ qemu-0.14.1/configure @@ -1020,6 +1020,7 @@ sh4eb-linux-user \ sparc-linux-user \ sparc64-linux-user \ sparc32plus-linux-user \ +s390x-linux-user \ " fi # the following are Darwin specific @@ -3008,6 +3009,7 @@ case "$target_arch2" in target_phys_bits=64 ;; s390x) + target_nptl="yes" target_phys_bits=64 ;; *) Index: qemu-0.14.1/cpu-all.h =================================================================== --- qemu-0.14.1.orig/cpu-all.h +++ qemu-0.14.1/cpu-all.h @@ -138,7 +138,7 @@ typedef union { uint64_t ll; } CPU_DoubleU; -#ifdef TARGET_SPARC +#if defined(TARGET_SPARC) || defined(TARGET_S390X) typedef union { float128 q; #if defined(HOST_WORDS_BIGENDIAN) \ Index: qemu-0.14.1/cpu-defs.h =================================================================== --- qemu-0.14.1.orig/cpu-defs.h +++ qemu-0.14.1/cpu-defs.h @@ -148,6 +148,13 @@ typedef struct CPUWatchpoint { } CPUWatchpoint; #define CPU_TEMP_BUF_NLONGS 128 + +#ifdef CONFIG_USER_ONLY +#define MULTITHREAD uint32_t multithreaded; +#else +#define MULTITHREAD +#endif + #define CPU_COMMON \ struct TranslationBlock *current_tb; /* currently executing TB */ \ /* soft mmu support */ \ @@ -160,6 +167,7 @@ typedef struct CPUWatchpoint { memory was accessed */ \ uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ uint32_t interrupt_request; \ + MULTITHREAD /* needs locking when accessing TBs */ \ volatile sig_atomic_t exit_request; \ CPU_COMMON_TLB \ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ Index: qemu-0.14.1/cpu-exec.c =================================================================== --- qemu-0.14.1.orig/cpu-exec.c +++ qemu-0.14.1/cpu-exec.c @@ -229,6 +229,9 @@ int cpu_exec(CPUState *env1) TranslationBlock *tb; uint8_t *tc_ptr; unsigned long next_tb; +#ifdef CONFIG_USER_ONLY + uint32_t multithreaded; +#endif if (cpu_halted(env1) == EXCP_HALTED) return EXCP_HALTED; @@ -573,7 +576,11 @@ int cpu_exec(CPUState *env1) #endif } #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */ - spin_lock(&tb_lock); +#ifdef CONFIG_USER_ONLY + multithreaded = env->multithreaded; + if (multithreaded) +#endif + spin_lock(&tb_lock); tb = tb_find_fast(); /* Note: we do it here to avoid a gcc bug on Mac OS X when doing it in tb_find_slow */ @@ -595,7 +602,10 @@ int cpu_exec(CPUState *env1) if (next_tb != 0 && tb->page_addr[1] == -1) { tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); } - spin_unlock(&tb_lock); +#ifdef CONFIG_USER_ONLY + if (multithreaded) +#endif + spin_unlock(&tb_lock); /* cpu_interrupt might be called while translating the TB, but before it is linked into a potentially Index: qemu-0.14.1/default-configs/s390x-linux-user.mak =================================================================== --- /dev/null +++ qemu-0.14.1/default-configs/s390x-linux-user.mak @@ -0,0 +1 @@ +# Default configuration for s390x-linux-user Index: qemu-0.14.1/disas.c =================================================================== --- qemu-0.14.1.orig/disas.c +++ qemu-0.14.1/disas.c @@ -215,6 +215,9 @@ void target_disas(FILE *out, target_ulon disasm_info.mach = bfd_mach_cris_v32; print_insn = print_insn_crisv32; } +#elif defined(TARGET_S390X) + disasm_info.mach = bfd_mach_s390_64; + print_insn = print_insn_s390; #elif defined(TARGET_MICROBLAZE) disasm_info.mach = bfd_arch_microblaze; print_insn = print_insn_microblaze; Index: qemu-0.14.1/linux-user/elfload.c =================================================================== --- qemu-0.14.1.orig/linux-user/elfload.c +++ qemu-0.14.1/linux-user/elfload.c @@ -793,6 +793,24 @@ static inline void init_thread(struct ta #endif /* TARGET_ALPHA */ +#ifdef TARGET_S390X + +#define ELF_START_MMAP (0x20000000000ULL) + +#define elf_check_arch(x) ( (x) == ELF_ARCH ) + +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_S390 + +static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) +{ + regs->psw.addr = infop->entry; + regs->gprs[15] = infop->start_stack; +} + +#endif /* TARGET_S390X */ + #ifndef ELF_PLATFORM #define ELF_PLATFORM (NULL) #endif Index: qemu-0.14.1/linux-user/main.c =================================================================== --- qemu-0.14.1.orig/linux-user/main.c +++ qemu-0.14.1/linux-user/main.c @@ -2624,6 +2624,86 @@ void cpu_loop (CPUState *env) } #endif /* TARGET_ALPHA */ +#ifdef TARGET_S390X +void cpu_loop(CPUS390XState *env) +{ + int trapnr; + target_siginfo_t info; + + while (1) { + trapnr = cpu_s390x_exec (env); + + if ((trapnr & 0xffff0000) == EXCP_EXECUTE_SVC) { + int n = trapnr & 0xffff; + env->regs[2] = do_syscall(env, n, + env->regs[2], + env->regs[3], + env->regs[4], + env->regs[5], + env->regs[6], + env->regs[7]); + } + else switch (trapnr) { + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + { + int sig; + + sig = gdb_handlesig (env, TARGET_SIGTRAP); + if (sig) { + info.si_signo = sig; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, &info); + } + } + break; + case EXCP_SVC: + { + int n = ldub(env->psw.addr - 1); + if (!n) n = env->regs[1]; /* syscalls > 255 */ + env->regs[2] = do_syscall(env, n, + env->regs[2], + env->regs[3], + env->regs[4], + env->regs[5], + env->regs[6], + env->regs[7]); + } + break; + case EXCP_ADDR: + { + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->__excp_addr; + queue_signal(env, info.si_signo, &info); + } + break; + case EXCP_SPEC: + { + fprintf(stderr,"specification exception insn 0x%08x%04x\n", ldl(env->psw.addr), lduw(env->psw.addr + 4)); + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = TARGET_ILL_ILLOPC; + info._sifields._sigfault._addr = env->__excp_addr; + queue_signal(env, info.si_signo, &info); + } + break; + default: + printf ("Unhandled trap: 0x%x\n", trapnr); + cpu_dump_state(env, stderr, fprintf, 0); + exit (1); + } + process_pending_signals (env); + } +} + +#endif /* TARGET_S390X */ + static void usage(void) { printf("qemu-" TARGET_ARCH " version " QEMU_VERSION QEMU_PKGVERSION ", Copyright (c) 2003-2008 Fabrice Bellard\n" @@ -3354,6 +3434,15 @@ int main(int argc, char **argv, char **e env->regs[15] = regs->acr; env->pc = regs->erp; } +#elif defined(TARGET_S390X) + { + int i; + for (i = 0; i < 16; i++) { + env->regs[i] = regs->gprs[i]; + } + env->psw.mask = regs->psw.mask; + env->psw.addr = regs->psw.addr; + } #else #error unsupported target CPU #endif Index: qemu-0.14.1/linux-user/s390x/syscall.h =================================================================== --- /dev/null +++ qemu-0.14.1/linux-user/s390x/syscall.h @@ -0,0 +1,25 @@ +/* this typedef defines how a Program Status Word looks like */ +typedef struct +{ + abi_ulong mask; + abi_ulong addr; +} __attribute__ ((aligned(8))) target_psw_t; + +/* + * The pt_regs struct defines the way the registers are stored on + * the stack during a system call. + */ + +#define TARGET_NUM_GPRS 16 + +struct target_pt_regs +{ + abi_ulong args[1]; + target_psw_t psw; + abi_ulong gprs[TARGET_NUM_GPRS]; + abi_ulong orig_gpr2; + unsigned short ilc; + unsigned short trap; +}; + +#define UNAME_MACHINE "s390x" Index: qemu-0.14.1/linux-user/s390x/syscall_nr.h =================================================================== --- /dev/null +++ qemu-0.14.1/linux-user/s390x/syscall_nr.h @@ -0,0 +1,348 @@ +/* + * This file contains the system call numbers. + */ + +#define TARGET_NR_exit 1 +#define TARGET_NR_fork 2 +#define TARGET_NR_read 3 +#define TARGET_NR_write 4 +#define TARGET_NR_open 5 +#define TARGET_NR_close 6 +#define TARGET_NR_restart_syscall 7 +#define TARGET_NR_creat 8 +#define TARGET_NR_link 9 +#define TARGET_NR_unlink 10 +#define TARGET_NR_execve 11 +#define TARGET_NR_chdir 12 +#define TARGET_NR_mknod 14 +#define TARGET_NR_chmod 15 +#define TARGET_NR_lseek 19 +#define TARGET_NR_getpid 20 +#define TARGET_NR_mount 21 +#define TARGET_NR_umount 22 +#define TARGET_NR_ptrace 26 +#define TARGET_NR_alarm 27 +#define TARGET_NR_pause 29 +#define TARGET_NR_utime 30 +#define TARGET_NR_access 33 +#define TARGET_NR_nice 34 +#define TARGET_NR_sync 36 +#define TARGET_NR_kill 37 +#define TARGET_NR_rename 38 +#define TARGET_NR_mkdir 39 +#define TARGET_NR_rmdir 40 +#define TARGET_NR_dup 41 +#define TARGET_NR_pipe 42 +#define TARGET_NR_times 43 +#define TARGET_NR_brk 45 +#define TARGET_NR_signal 48 +#define TARGET_NR_acct 51 +#define TARGET_NR_umount2 52 +#define TARGET_NR_ioctl 54 +#define TARGET_NR_fcntl 55 +#define TARGET_NR_setpgid 57 +#define TARGET_NR_umask 60 +#define TARGET_NR_chroot 61 +#define TARGET_NR_ustat 62 +#define TARGET_NR_dup2 63 +#define TARGET_NR_getppid 64 +#define TARGET_NR_getpgrp 65 +#define TARGET_NR_setsid 66 +#define TARGET_NR_sigaction 67 +#define TARGET_NR_sigsuspend 72 +#define TARGET_NR_sigpending 73 +#define TARGET_NR_sethostname 74 +#define TARGET_NR_setrlimit 75 +#define TARGET_NR_getrusage 77 +#define TARGET_NR_gettimeofday 78 +#define TARGET_NR_settimeofday 79 +#define TARGET_NR_symlink 83 +#define TARGET_NR_readlink 85 +#define TARGET_NR_uselib 86 +#define TARGET_NR_swapon 87 +#define TARGET_NR_reboot 88 +#define TARGET_NR_readdir 89 +#define TARGET_NR_mmap 90 +#define TARGET_NR_munmap 91 +#define TARGET_NR_truncate 92 +#define TARGET_NR_ftruncate 93 +#define TARGET_NR_fchmod 94 +#define TARGET_NR_getpriority 96 +#define TARGET_NR_setpriority 97 +#define TARGET_NR_statfs 99 +#define TARGET_NR_fstatfs 100 +#define TARGET_NR_socketcall 102 +#define TARGET_NR_syslog 103 +#define TARGET_NR_setitimer 104 +#define TARGET_NR_getitimer 105 +#define TARGET_NR_stat 106 +#define TARGET_NR_lstat 107 +#define TARGET_NR_fstat 108 +#define TARGET_NR_lookup_dcookie 110 +#define TARGET_NR_vhangup 111 +#define TARGET_NR_idle 112 +#define TARGET_NR_wait4 114 +#define TARGET_NR_swapoff 115 +#define TARGET_NR_sysinfo 116 +#define TARGET_NR_ipc 117 +#define TARGET_NR_fsync 118 +#define TARGET_NR_sigreturn 119 +#define TARGET_NR_clone 120 +#define TARGET_NR_setdomainname 121 +#define TARGET_NR_uname 122 +#define TARGET_NR_adjtimex 124 +#define TARGET_NR_mprotect 125 +#define TARGET_NR_sigprocmask 126 +#define TARGET_NR_create_module 127 +#define TARGET_NR_init_module 128 +#define TARGET_NR_delete_module 129 +#define TARGET_NR_get_kernel_syms 130 +#define TARGET_NR_quotactl 131 +#define TARGET_NR_getpgid 132 +#define TARGET_NR_fchdir 133 +#define TARGET_NR_bdflush 134 +#define TARGET_NR_sysfs 135 +#define TARGET_NR_personality 136 +#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define TARGET_NR_getdents 141 +#define TARGET_NR_flock 143 +#define TARGET_NR_msync 144 +#define TARGET_NR_readv 145 +#define TARGET_NR_writev 146 +#define TARGET_NR_getsid 147 +#define TARGET_NR_fdatasync 148 +#define TARGET_NR__sysctl 149 +#define TARGET_NR_mlock 150 +#define TARGET_NR_munlock 151 +#define TARGET_NR_mlockall 152 +#define TARGET_NR_munlockall 153 +#define TARGET_NR_sched_setparam 154 +#define TARGET_NR_sched_getparam 155 +#define TARGET_NR_sched_setscheduler 156 +#define TARGET_NR_sched_getscheduler 157 +#define TARGET_NR_sched_yield 158 +#define TARGET_NR_sched_get_priority_max 159 +#define TARGET_NR_sched_get_priority_min 160 +#define TARGET_NR_sched_rr_get_interval 161 +#define TARGET_NR_nanosleep 162 +#define TARGET_NR_mremap 163 +#define TARGET_NR_query_module 167 +#define TARGET_NR_poll 168 +#define TARGET_NR_nfsservctl 169 +#define TARGET_NR_prctl 172 +#define TARGET_NR_rt_sigreturn 173 +#define TARGET_NR_rt_sigaction 174 +#define TARGET_NR_rt_sigprocmask 175 +#define TARGET_NR_rt_sigpending 176 +#define TARGET_NR_rt_sigtimedwait 177 +#define TARGET_NR_rt_sigqueueinfo 178 +#define TARGET_NR_rt_sigsuspend 179 +#define TARGET_NR_pread64 180 +#define TARGET_NR_pwrite64 181 +#define TARGET_NR_getcwd 183 +#define TARGET_NR_capget 184 +#define TARGET_NR_capset 185 +#define TARGET_NR_sigaltstack 186 +#define TARGET_NR_sendfile 187 +#define TARGET_NR_getpmsg 188 +#define TARGET_NR_putpmsg 189 +#define TARGET_NR_vfork 190 +#define TARGET_NR_pivot_root 217 +#define TARGET_NR_mincore 218 +#define TARGET_NR_madvise 219 +#define TARGET_NR_getdents64 220 +#define TARGET_NR_readahead 222 +#define TARGET_NR_setxattr 224 +#define TARGET_NR_lsetxattr 225 +#define TARGET_NR_fsetxattr 226 +#define TARGET_NR_getxattr 227 +#define TARGET_NR_lgetxattr 228 +#define TARGET_NR_fgetxattr 229 +#define TARGET_NR_listxattr 230 +#define TARGET_NR_llistxattr 231 +#define TARGET_NR_flistxattr 232 +#define TARGET_NR_removexattr 233 +#define TARGET_NR_lremovexattr 234 +#define TARGET_NR_fremovexattr 235 +#define TARGET_NR_gettid 236 +#define TARGET_NR_tkill 237 +#define TARGET_NR_futex 238 +#define TARGET_NR_sched_setaffinity 239 +#define TARGET_NR_sched_getaffinity 240 +#define TARGET_NR_tgkill 241 +/* Number 242 is reserved for tux */ +#define TARGET_NR_io_setup 243 +#define TARGET_NR_io_destroy 244 +#define TARGET_NR_io_getevents 245 +#define TARGET_NR_io_submit 246 +#define TARGET_NR_io_cancel 247 +#define TARGET_NR_exit_group 248 +#define TARGET_NR_epoll_create 249 +#define TARGET_NR_epoll_ctl 250 +#define TARGET_NR_epoll_wait 251 +#define TARGET_NR_set_tid_address 252 +#define TARGET_NR_fadvise64 253 +#define TARGET_NR_timer_create 254 +#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1) +#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2) +#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3) +#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4) +#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5) +#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6) +#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7) +#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8) +/* Number 263 is reserved for vserver */ +#define TARGET_NR_statfs64 265 +#define TARGET_NR_fstatfs64 266 +#define TARGET_NR_remap_file_pages 267 +/* Number 268 is reserved for new sys_mbind */ +/* Number 269 is reserved for new sys_get_mempolicy */ +/* Number 270 is reserved for new sys_set_mempolicy */ +#define TARGET_NR_mq_open 271 +#define TARGET_NR_mq_unlink 272 +#define TARGET_NR_mq_timedsend 273 +#define TARGET_NR_mq_timedreceive 274 +#define TARGET_NR_mq_notify 275 +#define TARGET_NR_mq_getsetattr 276 +#define TARGET_NR_kexec_load 277 +#define TARGET_NR_add_key 278 +#define TARGET_NR_request_key 279 +#define TARGET_NR_keyctl 280 +#define TARGET_NR_waitid 281 +#define TARGET_NR_ioprio_set 282 +#define TARGET_NR_ioprio_get 283 +#define TARGET_NR_inotify_init 284 +#define TARGET_NR_inotify_add_watch 285 +#define TARGET_NR_inotify_rm_watch 286 +/* Number 287 is reserved for new sys_migrate_pages */ +#define TARGET_NR_openat 288 +#define TARGET_NR_mkdirat 289 +#define TARGET_NR_mknodat 290 +#define TARGET_NR_fchownat 291 +#define TARGET_NR_futimesat 292 +#define TARGET_NR_unlinkat 294 +#define TARGET_NR_renameat 295 +#define TARGET_NR_linkat 296 +#define TARGET_NR_symlinkat 297 +#define TARGET_NR_readlinkat 298 +#define TARGET_NR_fchmodat 299 +#define TARGET_NR_faccessat 300 +#define TARGET_NR_pselect6 301 +#define TARGET_NR_ppoll 302 +#define TARGET_NR_unshare 303 +#define TARGET_NR_set_robust_list 304 +#define TARGET_NR_get_robust_list 305 +#define TARGET_NR_splice 306 +#define TARGET_NR_sync_file_range 307 +#define TARGET_NR_tee 308 +#define TARGET_NR_vmsplice 309 +/* Number 310 is reserved for new sys_move_pages */ +#define TARGET_NR_getcpu 311 +#define TARGET_NR_epoll_pwait 312 +#define TARGET_NR_utimes 313 +#define TARGET_NR_fallocate 314 +#define TARGET_NR_utimensat 315 +#define TARGET_NR_signalfd 316 +#define TARGET_NR_timerfd 317 +#define TARGET_NR_eventfd 318 +#define TARGET_NR_timerfd_create 319 +#define TARGET_NR_timerfd_settime 320 +#define TARGET_NR_timerfd_gettime 321 +#define TARGET_NR_signalfd4 322 +#define TARGET_NR_eventfd2 323 +#define TARGET_NR_inotify_init1 324 +#define TARGET_NR_pipe2 325 +#define TARGET_NR_dup3 326 +#define TARGET_NR_epoll_create1 327 +#define NR_syscalls 328 + +/* + * There are some system calls that are not present on 64 bit, some + * have a different name although they do the same (e.g. TARGET_NR_chown32 + * is TARGET_NR_chown on 64 bit). + */ +#ifndef TARGET_S390X + +#define TARGET_NR_time 13 +#define TARGET_NR_lchown 16 +#define TARGET_NR_setuid 23 +#define TARGET_NR_getuid 24 +#define TARGET_NR_stime 25 +#define TARGET_NR_setgid 46 +#define TARGET_NR_getgid 47 +#define TARGET_NR_geteuid 49 +#define TARGET_NR_getegid 50 +#define TARGET_NR_setreuid 70 +#define TARGET_NR_setregid 71 +#define TARGET_NR_getrlimit 76 +#define TARGET_NR_getgroups 80 +#define TARGET_NR_setgroups 81 +#define TARGET_NR_fchown 95 +#define TARGET_NR_ioperm 101 +#define TARGET_NR_setfsuid 138 +#define TARGET_NR_setfsgid 139 +#define TARGET_NR__llseek 140 +#define TARGET_NR__newselect 142 +#define TARGET_NR_setresuid 164 +#define TARGET_NR_getresuid 165 +#define TARGET_NR_setresgid 170 +#define TARGET_NR_getresgid 171 +#define TARGET_NR_chown 182 +#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define TARGET_NR_mmap2 192 +#define TARGET_NR_truncate64 193 +#define TARGET_NR_ftruncate64 194 +#define TARGET_NR_stat64 195 +#define TARGET_NR_lstat64 196 +#define TARGET_NR_fstat64 197 +#define TARGET_NR_lchown32 198 +#define TARGET_NR_getuid32 199 +#define TARGET_NR_getgid32 200 +#define TARGET_NR_geteuid32 201 +#define TARGET_NR_getegid32 202 +#define TARGET_NR_setreuid32 203 +#define TARGET_NR_setregid32 204 +#define TARGET_NR_getgroups32 205 +#define TARGET_NR_setgroups32 206 +#define TARGET_NR_fchown32 207 +#define TARGET_NR_setresuid32 208 +#define TARGET_NR_getresuid32 209 +#define TARGET_NR_setresgid32 210 +#define TARGET_NR_getresgid32 211 +#define TARGET_NR_chown32 212 +#define TARGET_NR_setuid32 213 +#define TARGET_NR_setgid32 214 +#define TARGET_NR_setfsuid32 215 +#define TARGET_NR_setfsgid32 216 +#define TARGET_NR_fcntl64 221 +#define TARGET_NR_sendfile64 223 +#define TARGET_NR_fadvise64_64 264 +#define TARGET_NR_fstatat64 293 + +#else + +#define TARGET_NR_select 142 +#define TARGET_NR_getrlimit 191 /* SuS compliant getrlimit */ +#define TARGET_NR_lchown 198 +#define TARGET_NR_getuid 199 +#define TARGET_NR_getgid 200 +#define TARGET_NR_geteuid 201 +#define TARGET_NR_getegid 202 +#define TARGET_NR_setreuid 203 +#define TARGET_NR_setregid 204 +#define TARGET_NR_getgroups 205 +#define TARGET_NR_setgroups 206 +#define TARGET_NR_fchown 207 +#define TARGET_NR_setresuid 208 +#define TARGET_NR_getresuid 209 +#define TARGET_NR_setresgid 210 +#define TARGET_NR_getresgid 211 +#define TARGET_NR_chown 212 +#define TARGET_NR_setuid 213 +#define TARGET_NR_setgid 214 +#define TARGET_NR_setfsuid 215 +#define TARGET_NR_setfsgid 216 +#define TARGET_NR_newfstatat 293 + +#endif + Index: qemu-0.14.1/linux-user/s390x/target_signal.h =================================================================== --- /dev/null +++ qemu-0.14.1/linux-user/s390x/target_signal.h @@ -0,0 +1,26 @@ +#ifndef TARGET_SIGNAL_H +#define TARGET_SIGNAL_H + +#include "cpu.h" + +typedef struct target_sigaltstack { + abi_ulong ss_sp; + int ss_flags; + abi_ulong ss_size; +} target_stack_t; + +/* + * sigaltstack controls + */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_SIGSTKSZ 8192 + +static inline abi_ulong get_sp_from_cpustate(CPUS390XState *state) +{ + return state->regs[15]; +} + +#endif /* TARGET_SIGNAL_H */ Index: qemu-0.14.1/linux-user/s390x/termbits.h =================================================================== --- /dev/null +++ qemu-0.14.1/linux-user/s390x/termbits.h @@ -0,0 +1,283 @@ +/* + * include/asm-s390/termbits.h + * + * S390 version + * + * Derived from "include/asm-i386/termbits.h" + */ + +#define TARGET_NCCS 19 +struct target_termios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ +}; + +struct target_termios2 { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ + unsigned int c_ispeed; /* input speed */ + unsigned int c_ospeed; /* output speed */ +}; + +struct target_ktermios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ + unsigned int c_ispeed; /* input speed */ + unsigned int c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define TARGET_VINTR 0 +#define TARGET_VQUIT 1 +#define TARGET_VERASE 2 +#define TARGET_VKILL 3 +#define TARGET_VEOF 4 +#define TARGET_VTIME 5 +#define TARGET_VMIN 6 +#define TARGET_VSWTC 7 +#define TARGET_VSTART 8 +#define TARGET_VSTOP 9 +#define TARGET_VSUSP 10 +#define TARGET_VEOL 11 +#define TARGET_VREPRINT 12 +#define TARGET_VDISCARD 13 +#define TARGET_VWERASE 14 +#define TARGET_VLNEXT 15 +#define TARGET_VEOL2 16 + +/* c_iflag bits */ +#define TARGET_IGNBRK 0000001 +#define TARGET_BRKINT 0000002 +#define TARGET_IGNPAR 0000004 +#define TARGET_PARMRK 0000010 +#define TARGET_INPCK 0000020 +#define TARGET_ISTRIP 0000040 +#define TARGET_INLCR 0000100 +#define TARGET_IGNCR 0000200 +#define TARGET_ICRNL 0000400 +#define TARGET_IUCLC 0001000 +#define TARGET_IXON 0002000 +#define TARGET_IXANY 0004000 +#define TARGET_IXOFF 0010000 +#define TARGET_IMAXBEL 0020000 +#define TARGET_IUTF8 0040000 + +/* c_oflag bits */ +#define TARGET_OPOST 0000001 +#define TARGET_OLCUC 0000002 +#define TARGET_ONLCR 0000004 +#define TARGET_OCRNL 0000010 +#define TARGET_ONOCR 0000020 +#define TARGET_ONLRET 0000040 +#define TARGET_OFILL 0000100 +#define TARGET_OFDEL 0000200 +#define TARGET_NLDLY 0000400 +#define TARGET_NL0 0000000 +#define TARGET_NL1 0000400 +#define TARGET_CRDLY 0003000 +#define TARGET_CR0 0000000 +#define TARGET_CR1 0001000 +#define TARGET_CR2 0002000 +#define TARGET_CR3 0003000 +#define TARGET_TABDLY 0014000 +#define TARGET_TAB0 0000000 +#define TARGET_TAB1 0004000 +#define TARGET_TAB2 0010000 +#define TARGET_TAB3 0014000 +#define TARGET_XTABS 0014000 +#define TARGET_BSDLY 0020000 +#define TARGET_BS0 0000000 +#define TARGET_BS1 0020000 +#define TARGET_VTDLY 0040000 +#define TARGET_VT0 0000000 +#define TARGET_VT1 0040000 +#define TARGET_FFDLY 0100000 +#define TARGET_FF0 0000000 +#define TARGET_FF1 0100000 + +/* c_cflag bit meaning */ +#define TARGET_CBAUD 0010017 +#define TARGET_B0 0000000 /* hang up */ +#define TARGET_B50 0000001 +#define TARGET_B75 0000002 +#define TARGET_B110 0000003 +#define TARGET_B134 0000004 +#define TARGET_B150 0000005 +#define TARGET_B200 0000006 +#define TARGET_B300 0000007 +#define TARGET_B600 0000010 +#define TARGET_B1200 0000011 +#define TARGET_B1800 0000012 +#define TARGET_B2400 0000013 +#define TARGET_B4800 0000014 +#define TARGET_B9600 0000015 +#define TARGET_B19200 0000016 +#define TARGET_B38400 0000017 +#define TARGET_EXTA B19200 +#define TARGET_EXTB B38400 +#define TARGET_CSIZE 0000060 +#define TARGET_CS5 0000000 +#define TARGET_CS6 0000020 +#define TARGET_CS7 0000040 +#define TARGET_CS8 0000060 +#define TARGET_CSTOPB 0000100 +#define TARGET_CREAD 0000200 +#define TARGET_PARENB 0000400 +#define TARGET_PARODD 0001000 +#define TARGET_HUPCL 0002000 +#define TARGET_CLOCAL 0004000 +#define TARGET_CBAUDEX 0010000 +#define TARGET_BOTHER 0010000 +#define TARGET_B57600 0010001 +#define TARGET_B115200 0010002 +#define TARGET_B230400 0010003 +#define TARGET_B460800 0010004 +#define TARGET_B500000 0010005 +#define TARGET_B576000 0010006 +#define TARGET_B921600 0010007 +#define TARGET_B1000000 0010010 +#define TARGET_B1152000 0010011 +#define TARGET_B1500000 0010012 +#define TARGET_B2000000 0010013 +#define TARGET_B2500000 0010014 +#define TARGET_B3000000 0010015 +#define TARGET_B3500000 0010016 +#define TARGET_B4000000 0010017 +#define TARGET_CIBAUD 002003600000 /* input baud rate */ +#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ +#define TARGET_CRTSCTS 020000000000 /* flow control */ + +#define TARGET_IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ + +/* c_lflag bits */ +#define TARGET_ISIG 0000001 +#define TARGET_ICANON 0000002 +#define TARGET_XCASE 0000004 +#define TARGET_ECHO 0000010 +#define TARGET_ECHOE 0000020 +#define TARGET_ECHOK 0000040 +#define TARGET_ECHONL 0000100 +#define TARGET_NOFLSH 0000200 +#define TARGET_TOSTOP 0000400 +#define TARGET_ECHOCTL 0001000 +#define TARGET_ECHOPRT 0002000 +#define TARGET_ECHOKE 0004000 +#define TARGET_FLUSHO 0010000 +#define TARGET_PENDIN 0040000 +#define TARGET_IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TARGET_TCOOFF 0 +#define TARGET_TCOON 1 +#define TARGET_TCIOFF 2 +#define TARGET_TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TARGET_TCIFLUSH 0 +#define TARGET_TCOFLUSH 1 +#define TARGET_TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TARGET_TCSANOW 0 +#define TARGET_TCSADRAIN 1 +#define TARGET_TCSAFLUSH 2 + +/* + * include/asm-s390/ioctls.h + * + * S390 version + * + * Derived from "include/asm-i386/ioctls.h" + */ + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TARGET_TCGETS 0x5401 +#define TARGET_TCSETS 0x5402 +#define TARGET_TCSETSW 0x5403 +#define TARGET_TCSETSF 0x5404 +#define TARGET_TCGETA 0x5405 +#define TARGET_TCSETA 0x5406 +#define TARGET_TCSETAW 0x5407 +#define TARGET_TCSETAF 0x5408 +#define TARGET_TCSBRK 0x5409 +#define TARGET_TCXONC 0x540A +#define TARGET_TCFLSH 0x540B +#define TARGET_TIOCEXCL 0x540C +#define TARGET_TIOCNXCL 0x540D +#define TARGET_TIOCSCTTY 0x540E +#define TARGET_TIOCGPGRP 0x540F +#define TARGET_TIOCSPGRP 0x5410 +#define TARGET_TIOCOUTQ 0x5411 +#define TARGET_TIOCSTI 0x5412 +#define TARGET_TIOCGWINSZ 0x5413 +#define TARGET_TIOCSWINSZ 0x5414 +#define TARGET_TIOCMGET 0x5415 +#define TARGET_TIOCMBIS 0x5416 +#define TARGET_TIOCMBIC 0x5417 +#define TARGET_TIOCMSET 0x5418 +#define TARGET_TIOCGSOFTCAR 0x5419 +#define TARGET_TIOCSSOFTCAR 0x541A +#define TARGET_FIONREAD 0x541B +#define TARGET_TIOCINQ FIONREAD +#define TARGET_TIOCLINUX 0x541C +#define TARGET_TIOCCONS 0x541D +#define TARGET_TIOCGSERIAL 0x541E +#define TARGET_TIOCSSERIAL 0x541F +#define TARGET_TIOCPKT 0x5420 +#define TARGET_FIONBIO 0x5421 +#define TARGET_TIOCNOTTY 0x5422 +#define TARGET_TIOCSETD 0x5423 +#define TARGET_TIOCGETD 0x5424 +#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ +#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ +#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TARGET_TCGETS2 _IOR('T',0x2A, struct termios2) +#define TARGET_TCSETS2 _IOW('T',0x2B, struct termios2) +#define TARGET_TCSETSW2 _IOW('T',0x2C, struct termios2) +#define TARGET_TCSETSF2 _IOW('T',0x2D, struct termios2) +#define TARGET_TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TARGET_TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TARGET_TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ + +#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define TARGET_FIOCLEX 0x5451 +#define TARGET_FIOASYNC 0x5452 +#define TARGET_TIOCSERCONFIG 0x5453 +#define TARGET_TIOCSERGWILD 0x5454 +#define TARGET_TIOCSERSWILD 0x5455 +#define TARGET_TIOCGLCKTRMIOS 0x5456 +#define TARGET_TIOCSLCKTRMIOS 0x5457 +#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TARGET_FIOQSIZE 0x545E + +/* Used for packet mode */ +#define TARGET_TIOCPKT_DATA 0 +#define TARGET_TIOCPKT_FLUSHREAD 1 +#define TARGET_TIOCPKT_FLUSHWRITE 2 +#define TARGET_TIOCPKT_STOP 4 +#define TARGET_TIOCPKT_START 8 +#define TARGET_TIOCPKT_NOSTOP 16 +#define TARGET_TIOCPKT_DOSTOP 32 + +#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + Index: qemu-0.14.1/linux-user/signal.c =================================================================== --- qemu-0.14.1.orig/linux-user/signal.c +++ qemu-0.14.1/linux-user/signal.c @@ -3614,6 +3614,320 @@ long do_rt_sigreturn(CPUState *env) return -TARGET_ENOSYS; } +#elif defined(TARGET_S390X) + +#define __NUM_GPRS 16 +#define __NUM_FPRS 16 +#define __NUM_ACRS 16 + +#define S390_SYSCALL_SIZE 2 +#define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ + +#define _SIGCONTEXT_NSIG 64 +#define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ +#define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) +#define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) +#define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ +#define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) + +typedef struct +{ + target_psw_t psw; + target_ulong gprs[__NUM_GPRS]; + unsigned int acrs[__NUM_ACRS]; +} target_s390_regs_common; + +typedef struct +{ + unsigned int fpc; + double fprs[__NUM_FPRS]; +} target_s390_fp_regs; + +typedef struct +{ + target_s390_regs_common regs; + target_s390_fp_regs fpregs; +} target_sigregs; + +struct target_sigcontext +{ + target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; + target_sigregs *sregs; +}; + +typedef struct +{ + uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + struct target_sigcontext sc; + target_sigregs sregs; + int signo; + uint8_t retcode[S390_SYSCALL_SIZE]; +} sigframe; + +struct target_ucontext { + target_ulong uc_flags; + struct target_ucontext *uc_link; + target_stack_t uc_stack; + target_sigregs uc_mcontext; + target_sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +typedef struct +{ + uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; + uint8_t retcode[S390_SYSCALL_SIZE]; + struct target_siginfo info; + struct target_ucontext uc; +} rt_sigframe; + +static inline abi_ulong +get_sigframe(struct target_sigaction *ka, CPUState *env, size_t frame_size) +{ + abi_ulong sp; + + /* Default to using normal stack */ + sp = env->regs[15]; + + /* This is the X/Open sanctioned signal stack switching. */ + if (ka->sa_flags & TARGET_SA_ONSTACK) { + if (! sas_ss_flags(sp)) + sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; + } + + /* This is the legacy signal stack switching. */ + else if (/* FIXME !user_mode(regs) */ 0 && + !(ka->sa_flags & TARGET_SA_RESTORER) && + ka->sa_restorer) { + sp = (abi_ulong) ka->sa_restorer; + } + + return (sp - frame_size) & -8ul; +} + +static void save_sigregs(CPUState *env, target_sigregs *sregs) +{ + int i; + //save_access_regs(current->thread.acrs); FIXME + + /* Copy a 'clean' PSW mask to the user to avoid leaking + information about whether PER is currently on. */ + __put_user(env->psw.mask, &sregs->regs.psw.mask); + __put_user(env->psw.addr, &sregs->regs.psw.addr); + for (i = 0; i < 16; i++) + __put_user(env->regs[i], &sregs->regs.gprs[i]); + for (i = 0; i < 16; i++) + __put_user(env->aregs[i], &sregs->regs.acrs[i]); + /* + * We have to store the fp registers to current->thread.fp_regs + * to merge them with the emulated registers. + */ + //save_fp_regs(¤t->thread.fp_regs); FIXME + for (i = 0; i < 16; i++) + __put_user(env->fregs[i].ll, &sregs->fpregs.fprs[i]); +} + +static void setup_frame(int sig, struct target_sigaction *ka, + target_sigset_t *set, CPUState *env) +{ + sigframe *frame; + abi_ulong frame_addr; + + frame_addr = get_sigframe(ka, env, sizeof *frame); + qemu_log("%s: frame_addr 0x%lx\n", __FUNCTION__, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) + goto give_sigsegv; + + qemu_log("%s: 1\n", __FUNCTION__); + if (__put_user(set->sig[0], &frame->sc.oldmask[0])) + goto give_sigsegv; + + save_sigregs(env, &frame->sregs); + + __put_user((abi_ulong)&frame->sregs, (abi_ulong *)&frame->sc.sregs); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa_flags & TARGET_SA_RESTORER) { + env->regs[14] = (unsigned long) + ka->sa_restorer | PSW_ADDR_AMODE; + } else { + env->regs[14] = (unsigned long) + frame->retcode | PSW_ADDR_AMODE; + if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, + (uint16_t *)(frame->retcode))) + goto give_sigsegv; + } + + /* Set up backchain. */ + if (__put_user(env->regs[15], (abi_ulong *) frame)) + goto give_sigsegv; + + /* Set up registers for signal handler */ + env->regs[15] = (target_ulong) frame; + env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; + + env->regs[2] = sig; //map_signal(sig); + env->regs[3] = (target_ulong) &frame->sc; + + /* We forgot to include these in the sigcontext. + To avoid breaking binary compatibility, they are passed as args. */ + env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; + env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; + + /* Place signal number on stack to allow backtrace from handler. */ + if (__put_user(env->regs[2], (int *) &frame->signo)) + goto give_sigsegv; + unlock_user_struct(frame, frame_addr, 1); + return; + +give_sigsegv: + qemu_log("%s: give_sigsegv\n", __FUNCTION__); + unlock_user_struct(frame, frame_addr, 1); + force_sig(TARGET_SIGSEGV); +} + +static void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, + target_sigset_t *set, CPUState *env) +{ + int i; + rt_sigframe *frame; + abi_ulong frame_addr; + + frame_addr = get_sigframe(ka, env, sizeof *frame); + qemu_log("%s: frame_addr 0x%lx\n", __FUNCTION__, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) + goto give_sigsegv; + + qemu_log("%s: 1\n", __FUNCTION__); + if (copy_siginfo_to_user(&frame->info, info)) + goto give_sigsegv; + + /* Create the ucontext. */ + __put_user(0, &frame->uc.uc_flags); + __put_user((abi_ulong)NULL, (abi_ulong*)&frame->uc.uc_link); + __put_user(target_sigaltstack_used.ss_sp, &frame->uc.uc_stack.ss_sp); + __put_user(sas_ss_flags(get_sp_from_cpustate(env)), + &frame->uc.uc_stack.ss_flags); + __put_user(target_sigaltstack_used.ss_size, &frame->uc.uc_stack.ss_size); + save_sigregs(env, &frame->uc.uc_mcontext); + for(i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user((abi_ulong)set->sig[i], (abi_ulong*)&frame->uc.uc_sigmask.sig[i]); + } + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa_flags & TARGET_SA_RESTORER) { + env->regs[14] = (unsigned long) + ka->sa_restorer | PSW_ADDR_AMODE; + } else { + env->regs[14] = (unsigned long) + frame->retcode | PSW_ADDR_AMODE; + if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, + (uint16_t *)(frame->retcode))) + goto give_sigsegv; + } + + /* Set up backchain. */ + if (__put_user(env->regs[15], (abi_ulong *) frame)) + goto give_sigsegv; + + /* Set up registers for signal handler */ + env->regs[15] = (target_ulong) frame; + env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; + + env->regs[2] = sig; //map_signal(sig); + env->regs[3] = (target_ulong) &frame->info; + env->regs[4] = (target_ulong) &frame->uc; + return; + +give_sigsegv: + qemu_log("%s: give_sigsegv\n", __FUNCTION__); + unlock_user_struct(frame, frame_addr, 1); + force_sig(TARGET_SIGSEGV); +} + +static int +restore_sigregs(CPUState *env, target_sigregs *sc) +{ + int err = 0; + int i; + + for (i = 0; i < 16; i++) { + err |= __get_user(env->regs[i], &sc->regs.gprs[i]); + } + + err |= __get_user(env->psw.mask, &sc->regs.psw.mask); + qemu_log("%s: sc->regs.psw.addr 0x%lx env->psw.addr 0x%lx\n", __FUNCTION__, sc->regs.psw.addr, env->psw.addr); + err |= __get_user(env->psw.addr, &sc->regs.psw.addr); + /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ + + for (i = 0; i < 16; i++) { + err |= __get_user(env->aregs[i], &sc->regs.acrs[i]); + } + for (i = 0; i < 16; i++) { + err |= __get_user(env->fregs[i].ll, &sc->fpregs.fprs[i]); + } + + return err; +} + +long do_sigreturn(CPUState *env) +{ + sigframe *frame; + abi_ulong frame_addr = env->regs[15]; + qemu_log("%s: frame_addr 0x%lx\n", __FUNCTION__, frame_addr); + target_sigset_t target_set; + sigset_t set; + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) + goto badframe; + if (__get_user(target_set.sig[0], &frame->sc.oldmask[0])) + goto badframe; + + target_to_host_sigset_internal(&set, &target_set); + sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ + + if (restore_sigregs(env, &frame->sregs)) + goto badframe; + + unlock_user_struct(frame, frame_addr, 0); + return env->regs[2]; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return 0; +} + +long do_rt_sigreturn(CPUState *env) +{ + rt_sigframe *frame; + abi_ulong frame_addr = env->regs[15]; + qemu_log("%s: frame_addr 0x%lx\n", __FUNCTION__, frame_addr); + sigset_t set; + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) + goto badframe; + target_to_host_sigset(&set, &frame->uc.uc_sigmask); + + sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ + + if (restore_sigregs(env, &frame->uc.uc_mcontext)) + goto badframe; + + if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.uc_stack), 0, + get_sp_from_cpustate(env)) == -EFAULT) + goto badframe; + unlock_user_struct(frame, frame_addr, 0); + return env->regs[2]; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return 0; +} + #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) /* FIXME: Many of the structures are defined for both PPC and PPC64, but Index: qemu-0.14.1/linux-user/syscall.c =================================================================== --- qemu-0.14.1.orig/linux-user/syscall.c +++ qemu-0.14.1/linux-user/syscall.c @@ -194,7 +194,7 @@ static type name (type1 arg1,type2 arg2, #define __NR_sys_inotify_add_watch __NR_inotify_add_watch #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch -#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) +#if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || defined(__s390x__) #define __NR__llseek __NR_lseek #endif @@ -323,7 +323,7 @@ static int sys_fchmodat(int dirfd, const return (fchmodat(dirfd, pathname, mode, 0)); } #endif -#if defined(TARGET_NR_fchownat) && defined(USE_UID16) +#if defined(TARGET_NR_fchownat) static int sys_fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group, int flags) { @@ -420,7 +420,7 @@ _syscall3(int,sys_faccessat,int,dirfd,co #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) #endif -#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16) +#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, uid_t,owner,gid_t,group,int,flags) #endif @@ -3779,6 +3779,7 @@ static int do_fork(CPUState *env, unsign #endif ts = qemu_mallocz(sizeof(TaskState)); init_task_state(ts); + env->multithreaded = 1; /* we create a new CPU instance. */ new_env = cpu_copy(env); #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC) @@ -5448,7 +5449,7 @@ abi_long do_syscall(void *cpu_env, int n ret = get_errno(settimeofday(&tv, NULL)); } break; -#ifdef TARGET_NR_select +#if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390) case TARGET_NR_select: { struct target_sel_arg_struct *sel; @@ -5563,7 +5564,9 @@ abi_long do_syscall(void *cpu_env, int n #endif #ifdef TARGET_NR_mmap case TARGET_NR_mmap: -#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) +#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \ + defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ + || defined(TARGET_S390X) { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; @@ -6059,6 +6062,8 @@ abi_long do_syscall(void *cpu_env, int n ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); #elif defined(TARGET_CRIS) ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5)); +#elif defined(TARGET_S390X) + ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); #else ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); #endif @@ -6263,8 +6268,12 @@ abi_long do_syscall(void *cpu_env, int n } break; #endif /* TARGET_NR_getdents64 */ -#ifdef TARGET_NR__newselect +#if defined(TARGET_NR__newselect) || defined(TARGET_S390X) +#ifdef TARGET_S390X + case TARGET_NR_select: +#else case TARGET_NR__newselect: +#endif ret = do_select(arg1, arg2, arg3, arg4, arg5); break; #endif @@ -6489,7 +6498,7 @@ abi_long do_syscall(void *cpu_env, int n case TARGET_NR_sigaltstack: #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ - defined(TARGET_M68K) + defined(TARGET_M68K) || defined(TARGET_S390X) ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env)); break; #else @@ -6722,18 +6731,35 @@ abi_long do_syscall(void *cpu_env, int n case TARGET_NR_setfsgid: ret = get_errno(setfsgid(arg1)); break; +#else /* USE_UID16 */ +#if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) + case TARGET_NR_fchownat: + if (!(p = lock_user_string(arg2))) + goto efault; + ret = get_errno(sys_fchownat(arg1, p, arg3, arg4, arg5)); + unlock_user(p, arg2, 0); + break; +#endif #endif /* USE_UID16 */ -#ifdef TARGET_NR_lchown32 +#if defined(TARGET_NR_lchown32) || !defined(USE_UID16) +#if defined(TARGET_NR_lchown32) case TARGET_NR_lchown32: +#else + case TARGET_NR_lchown: +#endif if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif -#ifdef TARGET_NR_getuid32 +#if defined(TARGET_NR_getuid32) || (defined(TARGET_NR_getuid) && !defined(USE_UID16)) +#if defined(TARGET_NR_getuid32) case TARGET_NR_getuid32: +#else + case TARGET_NR_getuid: +#endif ret = get_errno(getuid()); break; #endif @@ -6878,33 +6904,57 @@ abi_long do_syscall(void *cpu_env, int n break; #endif -#ifdef TARGET_NR_getgid32 +#if defined(TARGET_NR_getgid32) || (defined(TARGET_NR_getgid) && !defined(USE_UID16)) +#if defined(TARGET_NR_getgid32) case TARGET_NR_getgid32: +#else + case TARGET_NR_getgid: +#endif ret = get_errno(getgid()); break; #endif -#ifdef TARGET_NR_geteuid32 +#if defined(TARGET_NR_geteuid32) || (defined(TARGET_NR_geteuid) && !defined(USE_UID16)) +#if defined(TARGET_NR_geteuid32) case TARGET_NR_geteuid32: +#else + case TARGET_NR_geteuid: +#endif ret = get_errno(geteuid()); break; #endif -#ifdef TARGET_NR_getegid32 +#if defined(TARGET_NR_getegid32) || (defined(TARGET_NR_getegid) && !defined(USE_UID16)) +#if defined(TARGET_NR_getegid32) case TARGET_NR_getegid32: +#else + case TARGET_NR_getegid: +#endif ret = get_errno(getegid()); break; #endif -#ifdef TARGET_NR_setreuid32 +#if defined(TARGET_NR_setreuid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setreuid32) case TARGET_NR_setreuid32: +#else + case TARGET_NR_setreuid: +#endif ret = get_errno(setreuid(arg1, arg2)); break; #endif -#ifdef TARGET_NR_setregid32 +#if defined(TARGET_NR_setregid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setregid32) case TARGET_NR_setregid32: +#else + case TARGET_NR_setregid: +#endif ret = get_errno(setregid(arg1, arg2)); break; #endif -#ifdef TARGET_NR_getgroups32 +#if defined(TARGET_NR_getgroups32) || !defined(USE_UID16) +#if defined(TARGET_NR_getgroups32) case TARGET_NR_getgroups32: +#else + case TARGET_NR_getgroups: +#endif { int gidsetsize = arg1; uint32_t *target_grouplist; @@ -6928,8 +6978,12 @@ abi_long do_syscall(void *cpu_env, int n } break; #endif -#ifdef TARGET_NR_setgroups32 +#if defined(TARGET_NR_setgroups32) || !defined(USE_UID16) +#if defined(TARGET_NR_setgroups32) case TARGET_NR_setgroups32: +#else + case TARGET_NR_setgroups: +#endif { int gidsetsize = arg1; uint32_t *target_grouplist; @@ -6949,18 +7003,30 @@ abi_long do_syscall(void *cpu_env, int n } break; #endif -#ifdef TARGET_NR_fchown32 +#if defined(TARGET_NR_fchown32) || !defined(USE_UID16) +#if defined(TARGET_NR_fchown32) case TARGET_NR_fchown32: +#else + case TARGET_NR_fchown: +#endif ret = get_errno(fchown(arg1, arg2, arg3)); break; #endif -#ifdef TARGET_NR_setresuid32 +#if defined(TARGET_NR_setresuid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setresuid32) case TARGET_NR_setresuid32: +#else + case TARGET_NR_setresuid: +#endif ret = get_errno(setresuid(arg1, arg2, arg3)); break; #endif -#ifdef TARGET_NR_getresuid32 +#if defined(TARGET_NR_getresuid32) || !defined(USE_UID16) +#if defined(TARGET_NR_getresuid32) case TARGET_NR_getresuid32: +#else + case TARGET_NR_getresuid: +#endif { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); @@ -6973,13 +7039,21 @@ abi_long do_syscall(void *cpu_env, int n } break; #endif -#ifdef TARGET_NR_setresgid32 +#if defined(TARGET_NR_setresgid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setresgid32) case TARGET_NR_setresgid32: +#else + case TARGET_NR_setresgid: +#endif ret = get_errno(setresgid(arg1, arg2, arg3)); break; #endif +#if defined(TARGET_NR_getresgid32) || !defined(USE_UID16) #ifdef TARGET_NR_getresgid32 case TARGET_NR_getresgid32: +#else + case TARGET_NR_getresgid: +#endif { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); @@ -6992,31 +7066,51 @@ abi_long do_syscall(void *cpu_env, int n } break; #endif -#ifdef TARGET_NR_chown32 +#if defined(TARGET_NR_chown32) || !defined(USE_UID16) +#if defined(TARGET_NR_chown32) case TARGET_NR_chown32: +#else + case TARGET_NR_chown: +#endif if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif -#ifdef TARGET_NR_setuid32 +#if defined(TARGET_NR_setuid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setuid32) case TARGET_NR_setuid32: +#else + case TARGET_NR_setuid: +#endif ret = get_errno(setuid(arg1)); break; #endif -#ifdef TARGET_NR_setgid32 +#if defined(TARGET_NR_setgid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setgid32) case TARGET_NR_setgid32: +#else + case TARGET_NR_setgid: +#endif ret = get_errno(setgid(arg1)); break; #endif -#ifdef TARGET_NR_setfsuid32 +#if defined(TARGET_NR_setfsuid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setfsuid32) case TARGET_NR_setfsuid32: +#else + case TARGET_NR_setfsuid: +#endif ret = get_errno(setfsuid(arg1)); break; #endif -#ifdef TARGET_NR_setfsgid32 +#if defined(TARGET_NR_setfsgid32) || !defined(USE_UID16) +#if defined(TARGET_NR_setfsgid32) case TARGET_NR_setfsgid32: +#else + case TARGET_NR_setfsgid: +#endif ret = get_errno(setfsgid(arg1)); break; #endif Index: qemu-0.14.1/linux-user/syscall_defs.h =================================================================== --- qemu-0.14.1.orig/linux-user/syscall_defs.h +++ qemu-0.14.1/linux-user/syscall_defs.h @@ -55,7 +55,7 @@ #endif #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \ - || defined(TARGET_M68K) || defined(TARGET_CRIS) + || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_S390X) #define TARGET_IOC_SIZEBITS 14 #define TARGET_IOC_DIRBITS 2 @@ -315,7 +315,10 @@ struct target_sigaction; int do_sigaction(int sig, const struct target_sigaction *act, struct target_sigaction *oact); -#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_MIPS) || defined (TARGET_SH4) || defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) || \ + defined(TARGET_PPC) || defined(TARGET_MIPS) || defined (TARGET_SH4) || \ + defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ + defined(TARGET_MICROBLAZE) || defined(TARGET_S390X) #if defined(TARGET_SPARC) #define TARGET_SA_NOCLDSTOP 8u @@ -1677,6 +1680,27 @@ struct target_stat { abi_long __unused[3]; }; +#elif defined(TARGET_S390X) +struct target_stat { + abi_ulong st_dev; + abi_ulong st_ino; + abi_ulong st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad1; + abi_ulong st_rdev; + abi_ulong st_size; + abi_ulong target_st_atime; + abi_ulong target_st_atime_nsec; + abi_ulong target_st_mtime; + abi_ulong target_st_mtime_nsec; + abi_ulong target_st_ctime; + abi_ulong target_st_ctime_nsec; + abi_ulong st_blksize; + abi_long st_blocks; + abi_ulong __unused[3]; +}; #else #error unsupported CPU #endif @@ -1763,6 +1787,34 @@ struct target_statfs64 { abi_long f_frsize; abi_long f_spare[5]; }; +#elif defined(TARGET_S390X) +struct target_statfs { + int32_t f_type; + int32_t f_bsize; + abi_long f_blocks; + abi_long f_bfree; + abi_long f_bavail; + abi_long f_files; + abi_long f_ffree; + kernel_fsid_t f_fsid; + int32_t f_namelen; + int32_t f_frsize; + int32_t f_spare[5]; +}; + +struct target_statfs64 { + int32_t f_type; + int32_t f_bsize; + abi_long f_blocks; + abi_long f_bfree; + abi_long f_bavail; + abi_long f_files; + abi_long f_ffree; + kernel_fsid_t f_fsid; + int32_t f_namelen; + int32_t f_frsize; + int32_t f_spare[5]; +}; #else struct target_statfs { uint32_t f_type; Index: qemu-0.14.1/s390x.ld =================================================================== --- /dev/null +++ qemu-0.14.1/s390x.ld @@ -0,0 +1,194 @@ +/* Default linker script, for normal executables */ +OUTPUT_FORMAT("elf64-s390", "elf64-s390", + "elf64-s390") +OUTPUT_ARCH(s390:64-bit) +ENTRY(_start) +SEARCH_DIR("/usr/s390x-suse-linux/lib64"); SEARCH_DIR("/usr/local/lib64"); SEARCH_DIR("/lib64"); SEARCH_DIR("/usr/lib64"); SEARCH_DIR("/usr/s390x-suse-linux/lib"); SEARCH_DIR("/usr/lib64"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib"); +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } + .init : + { + KEEP (*(.init)) + } =0x07070707 + .plt : { *(.plt) } + .text : + { + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0x07070707 + .fini : + { + KEEP (*(.fini)) + } =0x07070707 + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE_HIDDEN (__init_array_end = .); + } + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE_HIDDEN (__fini_array_end = .); + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.got) } + .data : + { + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. + FIXME: Why do we need it? When there is no .bss section, we don't + pad the .data section. */ + . = ALIGN(. != 0 ? 64 / 8 : 1); + } + . = ALIGN(64 / 8); + . = ALIGN(64 / 8); + _end = .; PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) } +} Index: qemu-0.14.1/scripts/qemu-binfmt-conf.sh =================================================================== --- qemu-0.14.1.orig/scripts/qemu-binfmt-conf.sh +++ qemu-0.14.1/scripts/qemu-binfmt-conf.sh @@ -1,5 +1,5 @@ #!/bin/sh -# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC program execution by the kernel +# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel # load the binfmt_misc module if [ ! -d /proc/sys/fs/binfmt_misc ]; then @@ -67,3 +67,6 @@ if [ $cpu != "sh" ] ; then echo ':sh4:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/bin/qemu-sh4-binfmt:P' > /proc/sys/fs/binfmt_misc/register echo ':sh4eb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/bin/qemu-sh4eb-binfmt:P' > /proc/sys/fs/binfmt_misc/register fi +if [ $cpu != "s390x" ] ; then + echo ':s390x:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-s390x-binfmt:P' > /proc/sys/fs/binfmt_misc/register +fi Index: qemu-0.14.1/target-s390x/cpu.h =================================================================== --- qemu-0.14.1.orig/target-s390x/cpu.h +++ qemu-0.14.1/target-s390x/cpu.h @@ -26,6 +26,14 @@ #define CPUState struct CPUS390XState #include "cpu-defs.h" +#define TARGET_PAGE_BITS 12 + +/* ??? This is certainly wrong for 64-bit s390x, but given that only KVM + emulation actually works, this is good enough for a placeholder. */ +#define TARGET_PHYS_ADDR_SPACE_BITS 64 +#define TARGET_VIRT_ADDR_SPACE_BITS 64 + +#include "cpu-all.h" #include "softfloat.h" @@ -51,7 +59,7 @@ typedef struct CPUS390XState { uint32_t aregs[16]; /* access registers */ uint32_t fpc; /* floating-point control register */ - FPReg fregs[16]; /* FP registers */ + CPU_DoubleU fregs[16]; /* FP registers */ float_status fpu_status; /* passed to softfloat lib */ struct { @@ -85,8 +93,10 @@ static inline int cpu_mmu_index (CPUStat } CPUS390XState *cpu_s390x_init(const char *cpu_model); +void s390x_translate_init(void); int cpu_s390x_exec(CPUS390XState *s); void cpu_s390x_close(CPUS390XState *s); +void do_interrupt (CPUState *env); /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero @@ -97,29 +107,32 @@ int cpu_s390x_handle_mmu_fault (CPUS390X int mmu_idx, int is_softmuu); #define cpu_handle_mmu_fault cpu_s390x_handle_mmu_fault -#define TARGET_PAGE_BITS 12 - -/* ??? This is certainly wrong for 64-bit s390x, but given that only KVM - emulation actually works, this is good enough for a placeholder. */ -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 #ifndef CONFIG_USER_ONLY int s390_virtio_hypercall(CPUState *env); void kvm_s390_virtio_irq(CPUState *env, int config_change, uint64_t token); CPUState *s390_cpu_addr2state(uint16_t cpu_addr); #endif +void cpu_lock(void); +void cpu_unlock(void); +static inline void cpu_set_tls(CPUS390XState *env, target_ulong newtls) +{ + env->aregs[0] = newtls >> 32; + env->aregs[1] = newtls & 0xffffffffULL; +} #define cpu_init cpu_s390x_init #define cpu_exec cpu_s390x_exec #define cpu_gen_code cpu_s390x_gen_code +#define cpu_signal_handler cpu_s390x_signal_handler -#include "cpu-all.h" +#include "exec-all.h" #define EXCP_OPEX 1 /* operation exception (sigill) */ #define EXCP_SVC 2 /* supervisor call (syscall) */ #define EXCP_ADDR 5 /* addressing exception */ +#define EXCP_SPEC 6 /* specification exception */ #define EXCP_EXECUTE_SVC 0xff00000 /* supervisor call via execute insn */ static inline void cpu_get_tb_cpu_state(CPUState* env, target_ulong *pc, Index: qemu-0.14.1/target-s390x/exec.h =================================================================== --- qemu-0.14.1.orig/target-s390x/exec.h +++ qemu-0.14.1/target-s390x/exec.h @@ -34,6 +34,14 @@ static inline int cpu_has_work(CPUState return env->interrupt_request & CPU_INTERRUPT_HARD; // guess } +static inline void regs_to_env(void) +{ +} + +static inline void env_to_regs(void) +{ +} + static inline int cpu_halted(CPUState *env) { if (!env->halted) { Index: qemu-0.14.1/target-s390x/helper.c =================================================================== --- qemu-0.14.1.orig/target-s390x/helper.c +++ qemu-0.14.1/target-s390x/helper.c @@ -26,8 +26,10 @@ #include "gdbstub.h" #include "qemu-common.h" +#if !defined(CONFIG_USER_ONLY) #include #include "kvm.h" +#endif CPUS390XState *cpu_s390x_init(const char *cpu_model) { @@ -38,6 +40,7 @@ CPUS390XState *cpu_s390x_init(const char cpu_exec_init(env); if (!inited) { inited = 1; + s390x_translate_init(); } env->cpu_model_str = cpu_model; @@ -46,6 +49,24 @@ CPUS390XState *cpu_s390x_init(const char return env; } +#if defined(CONFIG_USER_ONLY) + +void do_interrupt (CPUState *env) +{ + env->exception_index = -1; +} + +int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + int mmu_idx, int is_softmmu) +{ + /* fprintf(stderr,"%s: address 0x%lx rw %d mmu_idx %d is_softmmu %d\n", __FUNCTION__, address, rw, mmu_idx, is_softmmu); */ + env->exception_index = EXCP_ADDR; + env->__excp_addr = address; /* FIXME: find out how this works on a real machine */ + return 1; +} + +#endif /* CONFIG_USER_ONLY */ + void cpu_reset(CPUS390XState *env) { if (qemu_loglevel_mask(CPU_LOG_RESET)) { @@ -58,13 +79,13 @@ void cpu_reset(CPUS390XState *env) tlb_flush(env, 1); } +#ifndef CONFIG_USER_ONLY + target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) { return 0; } -#ifndef CONFIG_USER_ONLY - int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int mmu_idx, int is_softmmu) { Index: qemu-0.14.1/target-s390x/helpers.h =================================================================== --- /dev/null +++ qemu-0.14.1/target-s390x/helpers.h @@ -0,0 +1,127 @@ +#include "def-helper.h" + +DEF_HELPER_1(exception, void, i32) +DEF_HELPER_4(nc, i32, i32, i32, i32, i32) +DEF_HELPER_4(oc, i32, i32, i32, i32, i32) +DEF_HELPER_4(xc, i32, i32, i32, i32, i32) +DEF_HELPER_4(mvc, void, i32, i32, i32, i32) +DEF_HELPER_4(clc, i32, i32, i32, i32, i32) +DEF_HELPER_4(lmg, void, i32, i32, i32, s32) +DEF_HELPER_4(stmg, void, i32, i32, i32, s32) +DEF_HELPER_FLAGS_1(set_cc_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32) +DEF_HELPER_FLAGS_1(set_cc_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64) +DEF_HELPER_FLAGS_1(set_cc_comp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32) +DEF_HELPER_FLAGS_1(set_cc_comp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64) +DEF_HELPER_FLAGS_1(set_cc_nz_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32) +DEF_HELPER_FLAGS_1(set_cc_nz_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64) +DEF_HELPER_FLAGS_2(set_cc_icm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32) +DEF_HELPER_3(brctg, void, i64, i64, s32) +DEF_HELPER_3(brct, void, i32, i64, s32) +DEF_HELPER_4(brcl, void, i32, i32, i64, s64) +DEF_HELPER_4(bcr, void, i32, i32, i64, i64) +DEF_HELPER_4(bc, void, i32, i32, i64, i64) +DEF_HELPER_FLAGS_2(cmp_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64) +DEF_HELPER_FLAGS_2(cmp_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32) +DEF_HELPER_FLAGS_2(cmp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32) +DEF_HELPER_FLAGS_2(cmp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64) +DEF_HELPER_3(clm, i32, i32, i32, i64) +DEF_HELPER_3(stcm, void, i32, i32, i64) +DEF_HELPER_2(mlg, void, i32, i64) +DEF_HELPER_2(dlg, void, i32, i64) +DEF_HELPER_FLAGS_3(set_cc_add64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64) +DEF_HELPER_FLAGS_3(set_cc_addu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(set_cc_add32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32) +DEF_HELPER_FLAGS_3(set_cc_addu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(set_cc_sub64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64) +DEF_HELPER_FLAGS_3(set_cc_subu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_FLAGS_3(set_cc_sub32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32) +DEF_HELPER_FLAGS_3(set_cc_subu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32) +DEF_HELPER_3(srst, i32, i32, i32, i32) +DEF_HELPER_3(clst, i32, i32, i32, i32) +DEF_HELPER_3(mvst, i32, i32, i32, i32) +DEF_HELPER_3(csg, i32, i32, i64, i32) +DEF_HELPER_3(cdsg, i32, i32, i64, i32) +DEF_HELPER_3(cs, i32, i32, i64, i32) +DEF_HELPER_4(ex, i32, i32, i64, i64, i64) +DEF_HELPER_FLAGS_2(tm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32) +DEF_HELPER_FLAGS_2(tmxx, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i32) +DEF_HELPER_2(abs_i32, i32, i32, s32) +DEF_HELPER_2(nabs_i32, i32, i32, s32) +DEF_HELPER_2(abs_i64, i32, i32, s64) +DEF_HELPER_2(nabs_i64, i32, i32, s64) +DEF_HELPER_3(stcmh, i32, i32, i64, i32) +DEF_HELPER_3(icmh, i32, i32, i64, i32) +DEF_HELPER_2(ipm, void, i32, i32) +DEF_HELPER_3(addc_u32, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(set_cc_addc_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64) +DEF_HELPER_3(stam, void, i32, i64, i32) +DEF_HELPER_3(mvcle, i32, i32, i64, i32) +DEF_HELPER_3(clcle, i32, i32, i64, i32) +DEF_HELPER_4(slb, i32, i32, i32, i32, i32) +DEF_HELPER_4(slbg, i32, i32, i32, i64, i64) +DEF_HELPER_2(cefbr, void, i32, s32) +DEF_HELPER_2(cdfbr, void, i32, s32) +DEF_HELPER_2(cxfbr, void, i32, s32) +DEF_HELPER_2(cegbr, void, i32, s64) +DEF_HELPER_2(cdgbr, void, i32, s64) +DEF_HELPER_2(cxgbr, void, i32, s64) +DEF_HELPER_2(adbr, i32, i32, i32) +DEF_HELPER_2(aebr, i32, i32, i32) +DEF_HELPER_2(sebr, i32, i32, i32) +DEF_HELPER_2(sdbr, i32, i32, i32) +DEF_HELPER_2(debr, void, i32, i32) +DEF_HELPER_2(dxbr, void, i32, i32) +DEF_HELPER_2(mdbr, void, i32, i32) +DEF_HELPER_2(mxbr, void, i32, i32) +DEF_HELPER_2(ldebr, void, i32, i32) +DEF_HELPER_2(ldxbr, void, i32, i32) +DEF_HELPER_2(lxdbr, void, i32, i32) +DEF_HELPER_2(ledbr, void, i32, i32) +DEF_HELPER_2(lexbr, void, i32, i32) +DEF_HELPER_2(lpebr, i32, i32, i32) +DEF_HELPER_2(lpdbr, i32, i32, i32) +DEF_HELPER_2(lpxbr, i32, i32, i32) +DEF_HELPER_2(ltebr, i32, i32, i32) +DEF_HELPER_2(ltdbr, i32, i32, i32) +DEF_HELPER_2(ltxbr, i32, i32, i32) +DEF_HELPER_2(lcebr, i32, i32, i32) +DEF_HELPER_2(lcdbr, i32, i32, i32) +DEF_HELPER_2(lcxbr, i32, i32, i32) +DEF_HELPER_2(ceb, i32, i32, i32) +DEF_HELPER_2(aeb, i32, i32, i32) +DEF_HELPER_2(deb, void, i32, i32) +DEF_HELPER_2(meeb, void, i32, i32) +DEF_HELPER_2(cdb, i32, i32, i64) +DEF_HELPER_2(adb, i32, i32, i64) +DEF_HELPER_2(seb, i32, i32, i32) +DEF_HELPER_2(sdb, i32, i32, i64) +DEF_HELPER_2(mdb, void, i32, i64) +DEF_HELPER_2(ddb, void, i32, i64) +DEF_HELPER_FLAGS_2(cebr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_FLAGS_2(cdbr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_FLAGS_2(cxbr, TCG_CALL_PURE, i32, i32, i32) +DEF_HELPER_3(cgebr, i32, i32, i32, i32) +DEF_HELPER_3(cgdbr, i32, i32, i32, i32) +DEF_HELPER_3(cgxbr, i32, i32, i32, i32) +DEF_HELPER_1(lzer, void, i32) +DEF_HELPER_1(lzdr, void, i32) +DEF_HELPER_1(lzxr, void, i32) +DEF_HELPER_3(cfebr, i32, i32, i32, i32) +DEF_HELPER_3(cfdbr, i32, i32, i32, i32) +DEF_HELPER_3(cfxbr, i32, i32, i32, i32) +DEF_HELPER_2(axbr, i32, i32, i32) +DEF_HELPER_2(sxbr, i32, i32, i32) +DEF_HELPER_2(meebr, void, i32, i32) +DEF_HELPER_2(ddbr, void, i32, i32) +DEF_HELPER_3(madb, void, i32, i64, i32) +DEF_HELPER_3(maebr, void, i32, i32, i32) +DEF_HELPER_3(madbr, void, i32, i32, i32) +DEF_HELPER_3(msdbr, void, i32, i32, i32) +DEF_HELPER_2(lxdb, void, i32, i64) +DEF_HELPER_FLAGS_2(tceb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_FLAGS_2(tcxb, TCG_CALL_PURE, i32, i32, i64) +DEF_HELPER_2(flogr, i32, i32, i64) +DEF_HELPER_2(sqdbr, void, i32, i32) + +#include "def-helper.h" Index: qemu-0.14.1/target-s390x/op_helper.c =================================================================== --- qemu-0.14.1.orig/target-s390x/op_helper.c +++ qemu-0.14.1/target-s390x/op_helper.c @@ -1,6 +1,7 @@ /* * S/390 helper routines * + * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2009 Alexander Graf * * This library is free software; you can redistribute it and/or @@ -18,6 +19,8 @@ */ #include "exec.h" +#include "helpers.h" +#include /*****************************************************************************/ /* Softmmu support */ @@ -71,3 +74,1607 @@ void tlb_fill (target_ulong addr, int is } #endif +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +/* raise an exception */ +void HELPER(exception)(uint32_t excp) +{ + HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp); + env->exception_index = excp; + cpu_loop_exit(); +} + +/* and on array */ +uint32_t HELPER(nc)(uint32_t l, uint32_t b, uint32_t d1, uint32_t d2) +{ + uint64_t dest = env->regs[b >> 4] + d1; + uint64_t src = env->regs[b & 0xf] + d2; + int i; + unsigned char x; + uint32_t cc = 0; + HELPER_LOG("%s l %d b 0x%x d1 %d d2 %d\n", __FUNCTION__, l, b, d1, d2); + for (i = 0; i <= l; i++) { + x = ldub(dest + i) & ldub(src + i); + if (x) cc = 1; + stb(dest + i, x); + } + return cc; +} + +/* xor on array */ +uint32_t HELPER(xc)(uint32_t l, uint32_t b, uint32_t d1, uint32_t d2) +{ + uint64_t dest = env->regs[b >> 4] + d1; + uint64_t src = env->regs[b & 0xf] + d2; + int i; + unsigned char x; + uint32_t cc = 0; + HELPER_LOG("%s l %d b 0x%x d1 %d d2 %d\n", __FUNCTION__, l, b, d1, d2); + for (i = 0; i <= l; i++) { + x = ldub(dest + i) ^ ldub(src + i); + if (x) cc = 1; + stb(dest + i, x); + } + return cc; +} + +/* or on array */ +uint32_t HELPER(oc)(uint32_t l, uint32_t b, uint32_t d1, uint32_t d2) +{ + uint64_t dest = env->regs[b >> 4] + d1; + uint64_t src = env->regs[b & 0xf] + d2; + int i; + unsigned char x; + uint32_t cc = 0; + HELPER_LOG("%s l %d b 0x%x d1 %d d2 %d\n", __FUNCTION__, l, b, d1, d2); + for (i = 0; i <= l; i++) { + x = ldub(dest + i) | ldub(src + i); + if (x) cc = 1; + stb(dest + i, x); + } + return cc; +} + +/* memcopy */ +void HELPER(mvc)(uint32_t l, uint32_t b, uint32_t d1, uint32_t d2) +{ + uint64_t dest = env->regs[b >> 4] + d1; + uint64_t src = env->regs[b & 0xf] + d2; + int i; + HELPER_LOG("%s l %d b 0x%x d1 %d d2 %d\n", __FUNCTION__, l, b, d1, d2); + for (i = 0; i <= l; i++) { + stb(dest + i, ldub(src + i)); + } +} + +/* compare unsigned byte arrays */ +uint32_t HELPER(clc)(uint32_t l, uint32_t b, uint32_t d1, uint32_t d2) +{ + uint64_t s1 = env->regs[b >> 4] + d1; + uint64_t s2 = env->regs[b & 0xf] + d2; + int i; + unsigned char x,y; + uint32_t cc; + HELPER_LOG("%s l %d b 0x%x d1 %d d2 %d\n", __FUNCTION__, l, b, d1, d2); + for (i = 0; i <= l; i++) { + x = ldub(s1 + i); + y = ldub(s2 + i); + HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); + if (x < y) { + cc = 1; + goto done; + } else if (x > y) { + cc = 2; + goto done; + } + } + cc = 0; +done: + HELPER_LOG("\n"); + return cc; +} + +/* load multiple 64-bit registers from memory */ +void HELPER(lmg)(uint32_t r1, uint32_t r3, uint32_t b2, int d2) +{ + uint64_t src = env->regs[b2] + d2; + for (;;) { + env->regs[r1] = ldq(src); + src += 8; + if (r1 == r3) break; + r1 = (r1 + 1) & 15; + } +} + +/* store multiple 64-bit registers to memory */ +void HELPER(stmg)(uint32_t r1, uint32_t r3, uint32_t b2, int d2) +{ + uint64_t dest = env->regs[b2] + d2; + HELPER_LOG("%s: r1 %d r3 %d\n", __FUNCTION__, r1, r3); + for (;;) { + HELPER_LOG("storing r%d in 0x%lx\n", r1, dest); + stq(dest, env->regs[r1]); + dest += 8; + if (r1 == r3) break; + r1 = (r1 + 1) & 15; + } +} + +/* set condition code for signed 32-bit arithmetics */ +uint32_t HELPER(set_cc_s32)(int32_t v) +{ + if (v < 0) return 1; + else if (v > 0) return 2; + else return 0; +} + +/* set condition code for signed 64-bit arithmetics */ +uint32_t HELPER(set_cc_s64)(int64_t v) +{ + if (v < 0) return 1; + else if (v > 0) return 2; + else return 0; +} + +/* set condition code for signed 32-bit two's complement */ +uint32_t HELPER(set_cc_comp_s32)(int32_t v) +{ + if ((uint32_t)v == 0x80000000UL) return 3; + else if (v < 0) return 1; + else if (v > 0) return 2; + else return 0; +} + +/* set condition code for signed 64-bit two's complement */ +uint32_t HELPER(set_cc_comp_s64)(int64_t v) +{ + if ((uint64_t)v == 0x8000000000000000ULL) return 3; + else if (v < 0) return 1; + else if (v > 0) return 2; + else return 0; +} + +/* set negative/zero condition code for 32-bit logical op */ +uint32_t HELPER(set_cc_nz_u32)(uint32_t v) +{ + if (v) return 1; + else return 0; +} + +/* set negative/zero condition code for 64-bit logical op */ +uint32_t HELPER(set_cc_nz_u64)(uint64_t v) +{ + if (v) return 1; + else return 0; +} + +/* set condition code for insert character under mask insn */ +uint32_t HELPER(set_cc_icm)(uint32_t mask, uint32_t val) +{ + HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val); + uint32_t cc; + if (!val || !mask) cc = 0; + else { + while (mask != 1) { + mask >>= 1; + val >>= 8; + } + if (val & 0x80) cc = 1; + else cc = 2; + } + return cc; +} + +/* branch relative on 64-bit count (condition is computed inline, this only + does the branch */ +void HELPER(brctg)(uint64_t flag, uint64_t pc, int32_t offset) +{ + if (flag) { + env->psw.addr = pc + offset; + } + else { + env->psw.addr = pc + 4; + } + HELPER_LOG("%s: pc 0x%lx flag %ld psw.addr 0x%lx\n", __FUNCTION__, pc, flag, + env->psw.addr); +} + +/* branch relative on 32-bit count (condition is computed inline, this only + does the branch */ +void HELPER(brct)(uint32_t flag, uint64_t pc, int32_t offset) +{ + if (flag) { + env->psw.addr = pc + offset; + } + else { + env->psw.addr = pc + 4; + } + HELPER_LOG("%s: pc 0x%lx flag %d psw.addr 0x%lx\n", __FUNCTION__, pc, flag, + env->psw.addr); +} + +/* relative conditional branch with long displacement */ +void HELPER(brcl)(uint32_t cc, uint32_t mask, uint64_t pc, int64_t offset) +{ + if ( mask & ( 1 << (3 - cc) ) ) { + env->psw.addr = pc + offset; + } + else { + env->psw.addr = pc + 6; + } + HELPER_LOG("%s: pc 0x%lx psw.addr 0x%lx\n", __FUNCTION__, pc, env->psw.addr); +} + +/* conditional branch to register (register content is passed as target) */ +void HELPER(bcr)(uint32_t cc, uint32_t mask, uint64_t target, uint64_t pc) +{ + if ( mask & ( 1 << (3 - cc) ) ) { + env->psw.addr = target; + } + else { + env->psw.addr = pc + 2; + } +} + +/* conditional branch to address (address is passed as target) */ +void HELPER(bc)(uint32_t cc, uint32_t mask, uint64_t target, uint64_t pc) +{ + if ( mask & ( 1 << (3 - cc) ) ) { + env->psw.addr = target; + } + else { + env->psw.addr = pc + 4; + } + HELPER_LOG("%s: pc 0x%lx psw.addr 0x%lx r2 0x%lx r5 0x%lx\n", __FUNCTION__, + pc, env->psw.addr, env->regs[2], env->regs[5]); +} + +/* 64-bit unsigned comparison */ +uint32_t HELPER(cmp_u64)(uint64_t o1, uint64_t o2) +{ + if (o1 < o2) return 1; + else if (o1 > o2) return 2; + else return 0; +} + +/* 32-bit unsigned comparison */ +uint32_t HELPER(cmp_u32)(uint32_t o1, uint32_t o2) +{ + HELPER_LOG("%s: o1 0x%x o2 0x%x\n", __FUNCTION__, o1, o2); + if (o1 < o2) return 1; + else if (o1 > o2) return 2; + else return 0; +} + +/* 64-bit signed comparison */ +uint32_t HELPER(cmp_s64)(int64_t o1, int64_t o2) +{ + HELPER_LOG("%s: o1 %ld o2 %ld\n", __FUNCTION__, o1, o2); + if (o1 < o2) return 1; + else if (o1 > o2) return 2; + else return 0; +} + +/* 32-bit signed comparison */ +uint32_t HELPER(cmp_s32)(int32_t o1, int32_t o2) +{ + if (o1 < o2) return 1; + else if (o1 > o2) return 2; + else return 0; +} + +/* compare logical under mask */ +uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr) +{ + uint8_t r,d; + uint32_t cc; + HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n",__FUNCTION__,r1,mask,addr); + cc = 0; + while (mask) { + if (mask & 8) { + d = ldub(addr); + r = (r1 & 0xff000000UL) >> 24; + HELPER_LOG("mask 0x%x %02x/%02x (0x%lx) ", mask, r, d, addr); + if (r < d) { + cc = 1; + break; + } + else if (r > d) { + cc = 2; + break; + } + addr++; + } + mask = (mask << 1) & 0xf; + r1 <<= 8; + } + HELPER_LOG("\n"); + return cc; +} + +/* store character under mask */ +void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr) +{ + uint8_t r; + HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n",__FUNCTION__,r1,mask,addr); + while (mask) { + if (mask & 8) { + r = (r1 & 0xff000000UL) >> 24; + stb(addr, r); + HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr); + addr++; + } + mask = (mask << 1) & 0xf; + r1 <<= 8; + } + HELPER_LOG("\n"); +} + +/* 64/64 -> 128 unsigned multiplication */ +void HELPER(mlg)(uint32_t r1, uint64_t v2) +{ +#if TARGET_LONG_BITS == 64 && defined(__GNUC__) /* assuming 64-bit hosts have __uint128_t */ + __uint128_t res = (__uint128_t)env->regs[r1 + 1]; + res *= (__uint128_t)v2; + env->regs[r1] = (uint64_t)(res >> 64); + env->regs[r1 + 1] = (uint64_t)res; +#else + mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2); +#endif +} + +/* 128 -> 64/64 unsigned division */ +void HELPER(dlg)(uint32_t r1, uint64_t v2) +{ +#if TARGET_LONG_BITS == 64 && defined(__GNUC__) /* assuming 64-bit hosts have __uint128_t */ + __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) | + (env->regs[r1+1]); + uint64_t divisor = v2; + __uint128_t quotient = dividend / divisor; + env->regs[r1+1] = quotient; + __uint128_t remainder = dividend % divisor; + env->regs[r1] = remainder; + HELPER_LOG("%s: dividend 0x%016lx%016lx divisor 0x%lx quotient 0x%lx rem 0x%lx\n", + __FUNCTION__, (uint64_t)(dividend >> 64), (uint64_t)dividend, + divisor, (uint64_t)quotient, (uint64_t)remainder); +#else + cpu_abort(env, "128 -> 64/64 division not implemented on this system\n"); +#endif +} + +/* set condition code for 64-bit signed addition */ +uint32_t HELPER(set_cc_add64)(int64_t a1, int64_t a2, int64_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) return 1; + else if (ar > 0) return 2; + else return 0; + } +} + +/* set condition code for 64-bit unsigned addition */ +uint32_t HELPER(set_cc_addu64)(uint64_t a1, uint64_t a2, uint64_t ar) +{ + if (ar == 0) { + if (a1) return 2; + else return 0; + } else { + if (ar < a1 || ar < a2) { + return 3; + } else { + return 1; + } + } +} + +/* set condition code for 32-bit signed addition */ +uint32_t HELPER(set_cc_add32)(int32_t a1, int32_t a2, int32_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) return 1; + else if (ar > 0) return 2; + else return 0; + } +} + +/* set condition code for 32-bit unsigned addition */ +uint32_t HELPER(set_cc_addu32)(uint32_t a1, uint32_t a2, uint32_t ar) +{ + if (ar == 0) { + if (a1) return 2; + else return 0; + } else { + if (ar < a1 || ar < a2) { + return 3; + } else { + return 1; + } + } +} + +/* set condition code for 64-bit signed subtraction */ +uint32_t HELPER(set_cc_sub64)(int64_t s1, int64_t s2, int64_t sr) +{ + if ((s1 > 0 && s2 < 0 && sr < 0) || (s1 < 0 && s2 > 0 && sr > 0)) { + return 3; /* overflow */ + } else { + if (sr < 0) return 1; + else if (sr > 0) return 2; + else return 0; + } +} + +/* set condition code for 32-bit signed subtraction */ +uint32_t HELPER(set_cc_sub32)(int32_t s1, int32_t s2, int32_t sr) +{ + if ((s1 > 0 && s2 < 0 && sr < 0) || (s1 < 0 && s2 > 0 && sr > 0)) { + return 3; /* overflow */ + } else { + if (sr < 0) return 1; + else if (sr > 0) return 2; + else return 0; + } +} + +/* set condition code for 32-bit unsigned subtraction */ +uint32_t HELPER(set_cc_subu32)(uint32_t s1, uint32_t s2, uint32_t sr) +{ + if (sr == 0) return 2; + else { + if (s2 > s1) return 1; + else return 3; + } +} + +/* set condition code for 64-bit unsigned subtraction */ +uint32_t HELPER(set_cc_subu64)(uint64_t s1, uint64_t s2, uint64_t sr) +{ + if (sr == 0) return 2; + else { + if (s2 > s1) return 1; + else return 3; + } +} + +/* search string (c is byte to search, r2 is string, r1 end of string) */ +uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + HELPER_LOG("%s: c %d *r1 0x%lx *r2 0x%lx\n", __FUNCTION__, c, env->regs[r1], + env->regs[r2]); + uint64_t i; + uint32_t cc; + for (i = env->regs[r2]; i != env->regs[r1]; i++) { + if (ldub(i) == c) { + env->regs[r1] = i; + cc = 1; + return cc; + } + } + cc = 2; + return cc; +} + +/* unsigned string compare (c is string terminator) */ +uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + uint64_t s1 = env->regs[r1]; + uint64_t s2 = env->regs[r2]; + uint8_t v1, v2; + uint32_t cc; + c = c & 0xff; +#ifdef CONFIG_USER_ONLY + if (!c) { + HELPER_LOG("%s: comparing '%s' and '%s'\n", + __FUNCTION__, (char*)s1, (char*)s2); + } +#endif + for (;;) { + v1 = ldub(s1); + v2 = ldub(s2); + if (v1 == c || v2 == c) break; + if (v1 != v2) break; + s1++; s2++; + } + + if (v1 == v2) cc = 0; + else { + if (v1 < v2) cc = 1; + else cc = 2; + env->regs[r1] = s1; + env->regs[r2] = s2; + } + return cc; +} + +/* string copy (c is string terminator) */ +uint32_t HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2) +{ + uint64_t dest = env->regs[r1]; + uint64_t src = env->regs[r2]; + uint8_t v; + c = c & 0xff; +#ifdef CONFIG_USER_ONLY + if (!c) { + HELPER_LOG("%s: copying '%s' to 0x%lx\n", __FUNCTION__, (char*)src, dest); + } +#endif + for (;;) { + v = ldub(src); + stb(dest, v); + if (v == c) break; + src++; dest++; + } + env->regs[r1] = dest; + return 1; +} + +/* compare and swap 64-bit */ +uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + uint64_t v2 = ldq(a2); + if (env->regs[r1] == v2) { + cc = 0; + stq(a2, env->regs[r3]); + } else { + cc = 1; + env->regs[r1] = v2; + } + return cc; +} + +/* compare double and swap 64-bit */ +uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + __uint128_t v2 = (((__uint128_t)ldq(a2)) << 64) | (__uint128_t)ldq(a2 + 8); + __uint128_t v1 = (((__uint128_t)env->regs[r1]) << 64) | (__uint128_t)env->regs[r1 + 1]; + if (v1 == v2) { + cc = 0; + stq(a2, env->regs[r3]); + stq(a2 + 8, env->regs[r3 + 1]); + } else { + cc = 1; + env->regs[r1] = v2 >> 64; + env->regs[r1 + 1] = v2 & 0xffffffffffffffffULL; + } + return cc; +} + +/* compare and swap 32-bit */ +uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + /* FIXME: locking? */ + uint32_t cc; + HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3); + uint32_t v2 = ldl(a2); + if (((uint32_t)env->regs[r1]) == v2) { + cc = 0; + stl(a2, (uint32_t)env->regs[r3]); + } else { + cc = 1; + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2; + } + return cc; +} + +/* execute instruction + this instruction executes an insn modified with the contents of r1 + it does not change the executed instruction in memory + it does not change the program counter + in other words: tricky... + currently implemented by interpreting the cases it is most commonly used in + */ +uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret) +{ + uint16_t insn = lduw(addr); + HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr, + insn); + if ((insn & 0xf0ff) == 0xd000) { + uint32_t l, insn2, b, d1, d2; + l = v1 & 0xff; + insn2 = ldl_code(addr + 2); + b = (((insn2 >> 28) & 0xf) << 4) | ((insn2 >> 12) & 0xf); + d1 = (insn2 >> 16) & 0xfff; + d2 = insn2 & 0xfff; + switch (insn & 0xf00) { + case 0x200: helper_mvc(l, b, d1, d2); return cc; break; + case 0x500: return helper_clc(l, b, d1, d2); break; + case 0x700: return helper_xc(l, b, d1, d2); break; + default: goto abort; break; + } + } + else if ((insn & 0xff00) == 0x0a00) { /* supervisor call */ + HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff); + env->psw.addr = ret; + helper_exception(EXCP_EXECUTE_SVC + ((insn | v1) & 0xff)); + } + else { +abort: + cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n", insn); + } + return cc; +} + +/* set condition code for test under mask */ +uint32_t HELPER(tm)(uint32_t val, uint32_t mask) +{ + HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask); + uint16_t r = val & mask; + if (r == 0) return 0; + else if (r == mask) return 3; + else return 1; +} + +/* set condition code for test under mask */ +uint32_t HELPER(tmxx)(uint64_t val, uint32_t mask) +{ + uint16_t r = val & mask; + HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r); + if (r == 0) return 0; + else if (r == mask) return 3; + else { + while (!(mask & 0x8000)) { + mask <<= 1; + val <<= 1; + } + if (val & 0x8000) return 2; + else return 1; + } +} + +/* absolute value 32-bit */ +uint32_t HELPER(abs_i32)(uint32_t reg, int32_t val) +{ + uint32_t cc; + if (val == 0x80000000UL) cc = 3; + else if (val) cc = 1; + else cc = 0; + + if (val < 0) { + env->regs[reg] = -val; + } else { + env->regs[reg] = val; + } + return cc; +} + +/* negative absolute value 32-bit */ +uint32_t HELPER(nabs_i32)(uint32_t reg, int32_t val) +{ + uint32_t cc; + if (val) cc = 1; + else cc = 0; + + if (val < 0) { + env->regs[reg] = (env->regs[reg] & 0xffffffff00000000ULL) | val; + } else { + env->regs[reg] = (env->regs[reg] & 0xffffffff00000000ULL) | ((uint32_t)-val); + } + return cc; +} + +/* absolute value 64-bit */ +uint32_t HELPER(abs_i64)(uint32_t reg, int64_t val) +{ + uint32_t cc; + if (val == 0x8000000000000000ULL) cc = 3; + else if (val) cc = 1; + else cc = 0; + + if (val < 0) { + env->regs[reg] = -val; + } else { + env->regs[reg] = val; + } + return cc; +} + +/* negative absolute value 64-bit */ +uint32_t HELPER(nabs_i64)(uint32_t reg, int64_t val) +{ + uint32_t cc; + if (val) cc = 1; + else cc = 0; + + if (val < 0) { + env->regs[reg] = val; + } else { + env->regs[reg] = -val; + } + return cc; +} + +/* add with carry 32-bit unsigned */ +uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t r1, uint32_t v2) +{ + uint32_t res; + uint32_t v1 = env->regs[r1] & 0xffffffffUL; + res = v1 + v2; + if (cc & 2) res++; + + if (res == 0) { + if (v1) cc = 2; + else cc = 0; + } else { + if (res < v1 || res < v2) { + cc = 3; + } else { + cc = 1; + } + } + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res; + return cc; +} + +/* CC for add with carry 64-bit unsigned (isn't this a duplicate of some other CC function?) */ +uint32_t HELPER(set_cc_addc_u64)(uint64_t v1, uint64_t v2, uint64_t res) +{ + uint32_t cc; + if (res == 0) { + if (v1) cc = 2; + else cc = 0; + } else { + if (res < v1 || res < v2) { + cc = 3; + } else { + cc = 1; + } + } + return cc; +} + +/* store character under mask high + operates on the upper half of r1 */ +uint32_t HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask) +{ + int pos = 56; /* top of the upper half of r1 */ + + while (mask) { + if (mask & 8) { + stb(address, (env->regs[r1] >> pos) & 0xff); + address++; + } + mask = (mask << 1) & 0xf; + pos -= 8; + } + return 0; +} + +/* insert character under mask high + same as icm, but operates on the upper half of r1 */ +uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask) +{ + int pos = 56; /* top of the upper half of r1 */ + uint64_t rmask = 0xff00000000000000ULL; + uint8_t val = 0; + int ccd = 0; + uint32_t cc; + + cc = 0; + + while (mask) { + if (mask & 8) { + env->regs[r1] &= ~rmask; + val = ldub(address); + if ((val & 0x80) && !ccd) cc = 1; + ccd = 1; + if (val && cc == 0) cc = 2; + env->regs[r1] |= (uint64_t)val << pos; + address++; + } + mask = (mask << 1) & 0xf; + pos -= 8; + rmask >>= 8; + } + return cc; +} + +/* insert psw mask and condition code into r1 */ +void HELPER(ipm)(uint32_t cc, uint32_t r1) +{ + uint64_t r = env->regs[r1]; + r &= 0xffffffff00ffffffULL; + r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf ); + env->regs[r1] = r; + HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__, cc, env->psw.mask, r); +} + +/* store access registers r1 to r3 in memory at a2 */ +void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + int i; + for (i = r1; i != ((r3 + 1) & 15); i = (i + 1) & 15) { + stl(a2, env->aregs[i]); + a2 += 4; + } +} + +/* move long extended + another memcopy insn with more bells and whistles */ +uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + uint64_t destlen = env->regs[r1 + 1]; + uint64_t dest = env->regs[r1]; + uint64_t srclen = env->regs[r3 + 1]; + uint64_t src = env->regs[r3]; + uint8_t pad = a2 & 0xff; + uint8_t v; + uint32_t cc; + if (destlen == srclen) cc = 0; + else if (destlen < srclen) cc = 1; + else cc = 2; + if (srclen > destlen) srclen = destlen; + for(;destlen && srclen;src++,dest++,destlen--,srclen--) { + v = ldub(src); + stb(dest, v); + } + for(;destlen;dest++,destlen--) { + stb(dest, pad); + } + env->regs[r1 + 1] = destlen; + env->regs[r3 + 1] -= src - env->regs[r3]; /* can't use srclen here, + we trunc'ed it */ + env->regs[r1] = dest; + env->regs[r3] = src; + + return cc; +} + +/* compare logical long extended + memcompare insn with padding */ +uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3) +{ + uint64_t destlen = env->regs[r1 + 1]; + uint64_t dest = env->regs[r1]; + uint64_t srclen = env->regs[r3 + 1]; + uint64_t src = env->regs[r3]; + uint8_t pad = a2 & 0xff; + uint8_t v1 = 0,v2 = 0; + uint32_t cc = 0; + if (!(destlen || srclen)) return cc; + if (srclen > destlen) srclen = destlen; + for(;destlen || srclen;src++,dest++,destlen--,srclen--) { + if (srclen) v1 = ldub(src); + else v1 = pad; + if (destlen) v2 = ldub(dest); + else v2 = pad; + if (v1 != v2) break; + } + + env->regs[r1 + 1] = destlen; + env->regs[r3 + 1] -= src - env->regs[r3]; /* can't use srclen here, + we trunc'ed it */ + env->regs[r1] = dest; + env->regs[r3] = src; + + if (v1 < v2) cc = 1; + else if (v1 > v2) cc = 2; + + return cc; +} + +/* subtract unsigned v2 from v1 with borrow */ +uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v1, uint32_t v2) +{ + uint32_t res = v1 + (~v2) + (cc >> 1); + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res; + if (cc & 2) { /* borrow */ + if (v1) return 1; + else return 0; + } else { + if (v1) return 3; + else return 2; + } +} + +/* subtract unsigned v2 from v1 with borrow */ +uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2) +{ + uint64_t res = v1 + (~v2) + (cc >> 1); + env->regs[r1] = res; + if (cc & 2) { /* borrow */ + if (v1) return 1; + else return 0; + } else { + if (v1) return 3; + else return 2; + } +} + +/* condition codes for binary FP ops */ +static uint32_t set_cc_f32(float32 v1, float32 v2) +{ + if (float32_is_nan(v1) || float32_is_nan(v2)) return 3; + else if (float32_eq(v1, v2, &env->fpu_status)) return 0; + else if (float32_lt(v1, v2, &env->fpu_status)) return 1; + else return 2; +} + +static uint32_t set_cc_f64(float64 v1, float64 v2) +{ + if (float64_is_nan(v1) || float64_is_nan(v2)) return 3; + else if (float64_eq(v1, v2, &env->fpu_status)) return 0; + else if (float64_lt(v1, v2, &env->fpu_status)) return 1; + else return 2; +} + +/* condition codes for unary FP ops */ +static uint32_t set_cc_nz_f32(float32 v) +{ + if (float32_is_nan(v)) return 3; + else if (float32_is_zero(v)) return 0; + else if (float32_is_neg(v)) return 1; + else return 2; +} + +static uint32_t set_cc_nz_f64(float64 v) +{ + if (float64_is_nan(v)) return 3; + else if (float64_is_zero(v)) return 0; + else if (float64_is_neg(v)) return 1; + else return 2; +} + +static uint32_t set_cc_nz_f128(float128 v) +{ + if (float128_is_nan(v)) return 3; + else if (float128_is_zero(v)) return 0; + else if (float128_is_neg(v)) return 1; + else return 2; +} + +/* convert 32-bit int to 64-bit float */ +void HELPER(cdfbr)(uint32_t f1, int32_t v2) +{ + HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status); +} + +/* convert 32-bit int to 128-bit float */ +void HELPER(cxfbr)(uint32_t f1, int32_t v2) +{ + CPU_QuadU v1; + v1.q = int32_to_float128(v2, &env->fpu_status); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; +} + +/* convert 64-bit int to 32-bit float */ +void HELPER(cegbr)(uint32_t f1, int64_t v2) +{ + HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status); +} + +/* convert 64-bit int to 64-bit float */ +void HELPER(cdgbr)(uint32_t f1, int64_t v2) +{ + HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1); + env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status); +} + +/* convert 64-bit int to 128-bit float */ +void HELPER(cxgbr)(uint32_t f1, int64_t v2) +{ + CPU_QuadU x1; + x1.q = int64_to_float128(v2, &env->fpu_status); + HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2, x1.ll.upper, x1.l); + env->fregs[f1].ll = x1.ll.upper; + env->fregs[f1 + 2].ll = x1.ll.lower; +} + +/* convert 32-bit int to 32-bit float */ +void HELPER(cefbr)(uint32_t f1, int32_t v2) +{ + env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status); + HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2, env->fregs[f1].l.upper, f1); +} + +/* 32-bit FP addition RR */ +uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper, env->fregs[f2].l.upper, &env->fpu_status); + HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__, env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1); + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* 64-bit FP addition RR */ +uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d, &env->fpu_status); + HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1); + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* 32-bit FP subtraction RR */ +uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper, env->fregs[f2].l.upper, &env->fpu_status); + HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__, env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1); + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* 64-bit FP subtraction RR */ +uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d, &env->fpu_status); + HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1); + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* 32-bit FP division RR */ +void HELPER(debr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper, env->fregs[f2].l.upper, &env->fpu_status); +} + +/* 128-bit FP division RR */ +void HELPER(dxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_div(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* 64-bit FP multiplication RR */ +void HELPER(mdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d, &env->fpu_status); +} + +/* 128-bit FP multiplication RR */ +void HELPER(mxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_mul(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* convert 32-bit float to 64-bit float */ +void HELPER(ldebr)(uint32_t r1, uint32_t r2) +{ + env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper, &env->fpu_status); +} + +/* convert 128-bit float to 64-bit float */ +void HELPER(ldxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status); + HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d); +} + +/* convert 64-bit float to 128-bit float */ +void HELPER(lxdbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU res; + res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; +} + +/* convert 64-bit float to 32-bit float */ +void HELPER(ledbr)(uint32_t f1, uint32_t f2) +{ + float64 d2 = env->fregs[f2].d; + env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status); +} + +/* convert 128-bit float to 32-bit float */ +void HELPER(lexbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status); + HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper); +} + +/* absolute value of 32-bit float */ +uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2) +{ + float32 v1; + float32 v2 = env->fregs[f2].d; + v1 = float32_abs(v2); + env->fregs[f1].d = v1; + return set_cc_nz_f32(v1); +} + +/* absolute value of 64-bit float */ +uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2) +{ + float64 v1; + float64 v2 = env->fregs[f2].d; + v1 = float64_abs(v2); + env->fregs[f1].d = v1; + return set_cc_nz_f64(v1); +} + +/* absolute value of 128-bit float */ +uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + v1.q = float128_abs(v2.q); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; + return set_cc_nz_f128(v1.q); +} + +/* load and test 64-bit float */ +uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = env->fregs[f2].d; + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* load and test 32-bit float */ +uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = env->fregs[f2].l.upper; + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* load and test 128-bit float */ +uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x; + x.ll.upper = env->fregs[f2].ll; + x.ll.lower = env->fregs[f2 + 2].ll; + env->fregs[f1].ll = x.ll.upper; + env->fregs[f1 + 2].ll = x.ll.lower; + return set_cc_nz_f128(x.q); +} + +/* negative absolute of 32-bit float */ +uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_sub(float32_zero, env->fregs[f2].l.upper, &env->fpu_status); + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* negative absolute of 64-bit float */ +uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_sub(float64_zero, env->fregs[f2].d, &env->fpu_status); + return set_cc_nz_f64(env->fregs[f1].d); +} + +/* convert 64-bit float to 128-bit float */ +uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU x1, x2; + x2.ll.upper = env->fregs[f2].ll; + x2.ll.lower = env->fregs[f2 + 2].ll; + x1.q = float128_sub(float64_to_float128(float64_zero, &env->fpu_status), x2.q, &env->fpu_status); + env->fregs[f1].ll = x1.ll.upper; + env->fregs[f1 + 2].ll = x1.ll.lower; + return set_cc_nz_f128(x1.q); +} + +/* 32-bit FP compare RM */ +uint32_t HELPER(ceb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__, v1, f1, v2.f); + return set_cc_f32(v1, v2.f); +} + +/* 32-bit FP addition RM */ +uint32_t HELPER(aeb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__, v1, f1, v2.f); + env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status); + return set_cc_nz_f32(env->fregs[f1].l.upper); +} + +/* 32-bit FP division RM */ +void HELPER(deb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__, v1, f1, v2.f); + env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status); +} + +/* 32-bit FP multiplication RM */ +void HELPER(meeb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__, v1, f1, v2.f); + env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status); +} + +/* 32-bit FP compare RR */ +uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2) +{ + float32 v1 = env->fregs[f1].l.upper; + float32 v2 = env->fregs[f2].l.upper;; + HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__, v1, f1, v2); + return set_cc_f32(v1, v2); +} + +/* 64-bit FP compare RR */ +uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2) +{ + float64 v1 = env->fregs[f1].d; + float64 v2 = env->fregs[f2].d;; + HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__, v1, f1, v2); + return set_cc_f64(v1, v2); +} + +/* 128-bit FP compare RR */ +uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + if (float128_is_nan(v1.q) || float128_is_nan(v2.q)) return 3; + else if (float128_eq(v1.q, v2.q, &env->fpu_status)) return 0; + else if (float128_lt(v1.q, v2.q, &env->fpu_status)) return 1; + else return 2; +} + +/* 64-bit FP compare RM */ +uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1, f1, v2.d); + return set_cc_f64(v1, v2.d); +} + +/* 64-bit FP addition RM */ +uint32_t HELPER(adb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__, v1, f1, v2.d); + env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status); + return set_cc_nz_f64(v1); +} + +/* 32-bit FP subtraction RM */ +uint32_t HELPER(seb)(uint32_t f1, uint32_t val) +{ + float32 v1 = env->fregs[f1].l.upper; + CPU_FloatU v2; + v2.l = val; + env->fregs[f1].l.upper = v1 = float32_sub(v1, v2.f, &env->fpu_status); + return set_cc_nz_f32(v1); +} + +/* 64-bit FP subtraction RM */ +uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status); + return set_cc_nz_f64(v1); +} + +/* 64-bit FP multiplication RM */ +void HELPER(mdb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__, v1, f1, v2.d); + env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status); +} + +/* 64-bit FP division RM */ +void HELPER(ddb)(uint32_t f1, uint64_t a2) +{ + float64 v1 = env->fregs[f1].d; + CPU_DoubleU v2; + v2.ll = ldq(a2); + HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__, v1, f1, v2.d); + env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status); +} + +static void set_round_mode(int m3) +{ + switch (m3) { + case 0: break; /* current mode */ + case 1: /* biased round no nearest */ + case 4: /* round to nearest */ + set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); + break; + case 5: /* round to zero */ + set_float_rounding_mode(float_round_to_zero, &env->fpu_status); + break; + case 6: /* round to +inf */ + set_float_rounding_mode(float_round_up, &env->fpu_status); + break; + case 7: /* round to -inf */ + set_float_rounding_mode(float_round_down, &env->fpu_status); + break; + } +} + +/* convert 32-bit float to 64-bit int */ +uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float32 v2 = env->fregs[f2].l.upper; + set_round_mode(m3); + env->regs[r1] = float32_to_int64(v2, &env->fpu_status); + return set_cc_nz_f32(v2); +} + +/* convert 64-bit float to 64-bit int */ +uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float64 v2 = env->fregs[f2].d; + set_round_mode(m3); + env->regs[r1] = float64_to_int64(v2, &env->fpu_status); + return set_cc_nz_f64(v2); +} + +/* convert 128-bit float to 64-bit int */ +uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + set_round_mode(m3); + env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status); + if (float128_is_nan(v2.q)) return 3; + else if (float128_is_zero(v2.q)) return 0; + else if (float128_is_neg(v2.q)) return 1; + else return 2; +} + +/* convert 32-bit float to 32-bit int */ +uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float32 v2 = env->fregs[f2].l.upper; + set_round_mode(m3); + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | float32_to_int32(v2, &env->fpu_status); + return set_cc_nz_f32(v2); +} + +/* convert 64-bit float to 32-bit int */ +uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + float64 v2 = env->fregs[f2].d; + set_round_mode(m3); + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | float64_to_int32(v2, &env->fpu_status); + return set_cc_nz_f64(v2); +} + +/* convert 128-bit float to 32-bit int */ +uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3) +{ + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | float128_to_int32(v2.q, &env->fpu_status); + return set_cc_nz_f128(v2.q); +} + +/* load 32-bit FP zero */ +void HELPER(lzer)(uint32_t f1) +{ + env->fregs[f1].l.upper = float32_zero; +} + +/* load 64-bit FP zero */ +void HELPER(lzdr)(uint32_t f1) +{ + env->fregs[f1].d = float64_zero; +} + +/* load 128-bit FP zero */ +void HELPER(lzxr)(uint32_t f1) +{ + CPU_QuadU x; + x.q = float64_to_float128(float64_zero, &env->fpu_status); + env->fregs[f1].ll = x.ll.upper; + env->fregs[f1 + 1].ll = x.ll.lower; +} + +/* 128-bit FP subtraction RR */ +uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_sub(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; + return set_cc_nz_f128(res.q); +} + +/* 128-bit FP addition RR */ +uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2) +{ + CPU_QuadU v1; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + CPU_QuadU v2; + v2.ll.upper = env->fregs[f2].ll; + v2.ll.lower = env->fregs[f2 + 2].ll; + CPU_QuadU res; + res.q = float128_add(v1.q, v2.q, &env->fpu_status); + env->fregs[f1].ll = res.ll.upper; + env->fregs[f1 + 2].ll = res.ll.lower; + return set_cc_nz_f128(res.q); +} + +/* 32-bit FP multiplication RR */ +void HELPER(meebr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper, env->fregs[f2].l.upper, &env->fpu_status); +} + +/* 64-bit FP division RR */ +void HELPER(ddbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d, &env->fpu_status); +} + +/* 64-bit FP multiply and add RM */ +void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3) +{ + HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3); + CPU_DoubleU v2; + v2.ll = ldq(a2); + env->fregs[f1].d = float64_add(env->fregs[f1].d, float64_mul(v2.d, env->fregs[f3].d, &env->fpu_status), &env->fpu_status); +} + +/* 64-bit FP multiply and add RR */ +void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3); + env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d, env->fregs[f3].d, &env->fpu_status), env->fregs[f1].d, &env->fpu_status); +} + +/* 64-bit FP multiply and subtract RR */ +void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3); + env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d, env->fregs[f3].d, &env->fpu_status), env->fregs[f1].d, &env->fpu_status); +} + +/* 32-bit FP multiply and add RR */ +void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2) +{ + env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper, float32_mul(env->fregs[f2].l.upper, env->fregs[f3].l.upper, &env->fpu_status), &env->fpu_status); +} + +/* convert 64-bit float to 128-bit float */ +void HELPER(lxdb)(uint32_t f1, uint64_t a2) +{ + CPU_DoubleU v2; + v2.ll = ldq(a2); + CPU_QuadU v1; + v1.q = float64_to_float128(v2.d, &env->fpu_status); + env->fregs[f1].ll = v1.ll.upper; + env->fregs[f1 + 2].ll = v1.ll.lower; +} + +/* test data class 32-bit */ +uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2) +{ + float32 v1 = env->fregs[f1].l.upper; + int neg = float32_is_neg(v1); + uint32_t cc = 0; + HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg); + if (float32_is_zero(v1) && (m2 & (1 << (11-neg)))) cc = 1; + else if (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) cc = 1; + else if (float32_is_nan(v1) && (m2 & (1 << (3-neg)))) cc = 1; + else if (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg)))) cc = 1; + else /* assume normalized number */ if (m2 & (1 << (9-neg))) cc = 1; + /* FIXME: denormalized? */ + return cc; +} + +/* test data class 64-bit */ +uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2) +{ + float64 v1 = env->fregs[f1].d; + int neg = float64_is_neg(v1); + uint32_t cc = 0; + HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg); + if (float64_is_zero(v1) && (m2 & (1 << (11-neg)))) cc = 1; + else if (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) cc = 1; + else if (float64_is_nan(v1) && (m2 & (1 << (3-neg)))) cc = 1; + else if (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg)))) cc = 1; + else /* assume normalized number */ if (m2 & (1 << (9-neg))) cc = 1; + /* FIXME: denormalized? */ + return cc; +} + +/* test data class 128-bit */ +uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2) +{ + CPU_QuadU v1; + uint32_t cc = 0; + v1.ll.upper = env->fregs[f1].ll; + v1.ll.lower = env->fregs[f1 + 2].ll; + + int neg = float128_is_neg(v1.q); + if (float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) cc = 1; + else if (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) cc = 1; + else if (float128_is_nan(v1.q) && (m2 & (1 << (3-neg)))) cc = 1; + else if (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg)))) cc = 1; + else /* assume normalized number */ if (m2 & (1 << (9-neg))) cc = 1; + /* FIXME: denormalized? */ + return cc; +} + +/* find leftmost one */ +uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2) +{ + uint64_t res = 0; + uint64_t ov2 = v2; + while (!(v2 & 0x8000000000000000ULL) && v2) { + v2 <<= 1; + res++; + } + if (!v2) { + env->regs[r1] = 64; + env->regs[r1 + 1] = 0; + return 0; + } + else { + env->regs[r1] = res; + env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res); + return 2; + } +} + +/* square root 64-bit RR */ +void HELPER(sqdbr)(uint32_t f1, uint32_t f2) +{ + env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status); +} Index: qemu-0.14.1/target-s390x/translate.c =================================================================== --- qemu-0.14.1.orig/target-s390x/translate.c +++ qemu-0.14.1/target-s390x/translate.c @@ -16,6 +16,18 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ +#include +#include +#include +#include +#include + +#define S390X_DEBUG_DISAS +#ifdef S390X_DEBUG_DISAS +# define LOG_DISAS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif #include "cpu.h" #include "exec-all.h" @@ -23,6 +35,25 @@ #include "tcg-op.h" #include "qemu-log.h" +/* global register indexes */ +static TCGv_ptr cpu_env; + +#include "gen-icount.h" +#include "helpers.h" +#define GEN_HELPER 1 +#include "helpers.h" + +typedef struct DisasContext DisasContext; +struct DisasContext { + uint64_t pc; + int is_jmp; + CPUS390XState *env; + struct TranslationBlock *tb; +}; + +#define DISAS_EXCP 4 +#define DISAS_SVC 5 + void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { @@ -46,12 +77,2776 @@ void cpu_dump_state(CPUState *env, FILE cpu_fprintf(f, "PSW=mask %016lx addr %016lx cc %02x\n", env->psw.mask, env->psw.addr, env->cc); } +#define TCGREGS + +static TCGv global_cc; +#ifdef TCGREGS +/* registers stored in TCG variables enhance performance */ +static TCGv_i64 tcgregs[16]; +static TCGv_i32 tcgregs32[16]; +#endif +static TCGv cc; +static TCGv psw_addr; + +void s390x_translate_init(void) +{ + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + global_cc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, cc), "global_cc"); +#ifdef TCGREGS + int i; + char rn[4]; + for (i = 0; i < 16; i++) { + sprintf(rn, "R%d", i); + tcgregs[i] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, regs[i]), strdup(rn)); + sprintf(rn, "r%d", i); + tcgregs32[i] = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUState, regs[i]) +#ifdef HOST_WORDS_BIGENDIAN + + 4 +#endif + , strdup(rn)); + } +#endif + psw_addr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, psw.addr), "psw_addr"); +} + +#ifdef TCGREGS +static inline void sync_reg64(int reg) +{ + tcg_gen_sync_i64(tcgregs[reg]); +} +static inline void sync_reg32(int reg) +{ + tcg_gen_sync_i32(tcgregs32[reg]); +} +#endif + +static TCGv load_reg(int reg) +{ + TCGv r = tcg_temp_new_i64(); +#ifdef TCGREGS + sync_reg32(reg); + tcg_gen_mov_i64(r, tcgregs[reg]); + return r; +#else + tcg_gen_ld_i64(r, cpu_env, offsetof(CPUState, regs[reg])); + return r; +#endif +} + +static TCGv load_freg(int reg) +{ + TCGv r = tcg_temp_new_i64(); + tcg_gen_ld_i64(r, cpu_env, offsetof(CPUState, fregs[reg].d)); + return r; +} + +static TCGv_i32 load_freg32(int reg) +{ + TCGv_i32 r = tcg_temp_new_i32(); + tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, fregs[reg].l.upper)); + return r; +} + +static void load_reg32_var(TCGv_i32 r, int reg) +{ +#ifdef TCGREGS + sync_reg64(reg); + tcg_gen_mov_i32(r, tcgregs32[reg]); +#else +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, regs[reg]) + 4); +#else + tcg_gen_ld_i32(r, cpu_env, offsetof(CPUState, regs[reg])); +#endif +#endif +} + +static TCGv_i32 load_reg32(int reg) +{ + TCGv_i32 r = tcg_temp_new_i32(); + load_reg32_var(r, reg); + return r; +} + +static void store_reg(int reg, TCGv v) +{ +#ifdef TCGREGS + sync_reg32(reg); + tcg_gen_mov_i64(tcgregs[reg], v); +#else + tcg_gen_st_i64(v, cpu_env, offsetof(CPUState, regs[reg])); +#endif +} + +static void store_freg(int reg, TCGv v) +{ + tcg_gen_st_i64(v, cpu_env, offsetof(CPUState, fregs[reg].d)); +} + +static void store_reg32(int reg, TCGv_i32 v) +{ +#ifdef TCGREGS + sync_reg64(reg); + tcg_gen_mov_i32(tcgregs32[reg], v); +#else +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, regs[reg]) + 4); +#else + tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, regs[reg])); +#endif +#endif +} + +static void store_reg8(int reg, TCGv_i32 v) +{ +#ifdef TCGREGS + TCGv_i32 tmp = tcg_temp_new_i32(); + sync_reg64(reg); + tcg_gen_andi_i32(tmp, tcgregs32[reg], 0xffffff00UL); + tcg_gen_or_i32(tcgregs32[reg], tmp, v); + tcg_temp_free(tmp); +#else +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_st8_i32(v, cpu_env, offsetof(CPUState, regs[reg]) + 7); +#else + tcg_gen_st8_i32(v, cpu_env, offsetof(CPUState, regs[reg])); +#endif +#endif +} + +static void store_freg32(int reg, TCGv v) +{ + tcg_gen_st_i32(v, cpu_env, offsetof(CPUState, fregs[reg].l.upper)); +} + +static void gen_illegal_opcode(DisasContext *s) +{ + TCGv tmp = tcg_const_i64(EXCP_SPEC); + gen_helper_exception(tmp); + tcg_temp_free(tmp); + s->is_jmp = DISAS_EXCP; +} + +#define DEBUGINSN LOG_DISAS("insn: 0x%lx\n", insn); + +static TCGv get_address(int x2, int b2, int d2) +{ + TCGv tmp = 0, tmp2; + if (d2) tmp = tcg_const_i64(d2); + if (x2) { + if (d2) { + tmp2 = load_reg(x2); + tcg_gen_add_i64(tmp, tmp, tmp2); + tcg_temp_free(tmp2); + } + else { + tmp = load_reg(x2); + } + } + if (b2) { + if (d2 || x2) { + tmp2 = load_reg(b2); + tcg_gen_add_i64(tmp, tmp, tmp2); + tcg_temp_free(tmp2); + } + else { + tmp = load_reg(b2); + } + } + + if (!(d2 || x2 || b2)) tmp = tcg_const_i64(0); + + return tmp; +} + +static inline void set_cc_nz_u32(TCGv val) +{ + gen_helper_set_cc_nz_u32(cc, val); +} + +static inline void set_cc_nz_u64(TCGv val) +{ + gen_helper_set_cc_nz_u64(cc, val); +} + +static inline void set_cc_s32(TCGv val) +{ + gen_helper_set_cc_s32(cc, val); +} + +static inline void set_cc_s64(TCGv val) +{ + gen_helper_set_cc_s64(cc, val); +} + +static inline void cmp_s32(TCGv v1, TCGv v2) +{ + gen_helper_cmp_s32(cc, v1, v2); +} + +static inline void cmp_u32(TCGv v1, TCGv v2) +{ + gen_helper_cmp_u32(cc, v1, v2); +} + +/* this is a hysterical raisin */ +static inline void cmp_s32c(TCGv v1, int32_t v2) +{ + TCGv_i32 tmp = tcg_const_i32(v2); + gen_helper_cmp_s32(cc, v1, tmp); + tcg_temp_free(tmp); +} +static inline void cmp_u32c(TCGv v1, uint32_t v2) +{ + TCGv_i32 tmp = tcg_const_i32(v2); + gen_helper_cmp_u32(cc, v1, tmp); + tcg_temp_free(tmp); +} + + +static inline void cmp_s64(TCGv v1, TCGv v2) +{ + gen_helper_cmp_s64(cc, v1, v2); +} + +static inline void cmp_u64(TCGv v1, TCGv v2) +{ + gen_helper_cmp_u64(cc, v1, v2); +} + +/* see cmp_[su]32c() */ +static inline void cmp_s64c(TCGv v1, int64_t v2) +{ + TCGv_i32 tmp = tcg_const_i64(v2); + gen_helper_cmp_s64(cc, v1, tmp); + tcg_temp_free(tmp); +} +static inline void cmp_u64c(TCGv v1, uint64_t v2) +{ + TCGv_i32 tmp = tcg_const_i64(v2); + gen_helper_cmp_u64(cc, v1, tmp); + tcg_temp_free(tmp); +} + +static void gen_bcr(uint32_t mask, int tr, uint64_t offset) +{ + TCGv target, o; + TCGv_i32 m; + if (mask == 0xf) { /* unconditional */ + target = load_reg(tr); + tcg_gen_mov_i64(psw_addr, target); + } + else { + m = tcg_const_i32(mask); + o = tcg_const_i64(offset); + gen_helper_bcr(cc, m, (target = load_reg(tr)), o); + tcg_temp_free(m); + tcg_temp_free(o); + } + tcg_temp_free(target); +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong pc) +{ + TranslationBlock *tb; + + tb = s->tb; + /* NOTE: we handle the case where the TB spans two pages here */ + if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || + (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { + /* jump to same page: we can use a direct jump */ + tcg_gen_mov_i32(global_cc, cc); + tcg_gen_goto_tb(tb_num); + tcg_gen_movi_i64(psw_addr, pc); + tcg_gen_exit_tb((long)tb + tb_num); + } else { + /* jump to another page: currently not optimized */ + tcg_gen_movi_i64(psw_addr, pc); + tcg_gen_mov_i32(global_cc, cc); + tcg_gen_exit_tb(0); + } +} + +static void gen_brc(uint32_t mask, DisasContext *s, int32_t offset) +{ + TCGv_i32 r; + TCGv_i32 tmp, tmp2; + int skip; + + if (mask == 0xf) { /* unconditional */ + //tcg_gen_movi_i64(psw_addr, s->pc + offset); + gen_goto_tb(s, 0, s->pc + offset); + } + else { + tmp = tcg_const_i32(3); + tcg_gen_sub_i32(tmp, tmp, cc); /* 3 - cc */ + tmp2 = tcg_const_i32(1); + tcg_gen_shl_i32(tmp2, tmp2, tmp); /* 1 << (3 - cc) */ + r = tcg_const_i32(mask); + tcg_gen_and_i32(r, r, tmp2); /* mask & (1 << (3 - cc)) */ + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + skip = gen_new_label(); + tcg_gen_brcondi_i32(TCG_COND_EQ, r, 0, skip); + gen_goto_tb(s, 0, s->pc + offset); + gen_set_label(skip); + gen_goto_tb(s, 1, s->pc + 4); + //tcg_gen_mov_i32(global_cc, cc); + tcg_temp_free(r); + } + s->is_jmp = DISAS_TB_JUMP; +} + +static void gen_set_cc_add64(TCGv v1, TCGv v2, TCGv vr) +{ + gen_helper_set_cc_add64(cc, v1, v2, vr); +} + +static void disas_e3(DisasContext* s, int op, int r1, int x2, int b2, int d2) +{ + TCGv tmp, tmp2, tmp3; + + LOG_DISAS("disas_e3: op 0x%x r1 %d x2 %d b2 %d d2 %d\n", op, r1, x2, b2, d2); + tmp = get_address(x2, b2, d2); + switch (op) { + case 0x2: /* LTG R1,D2(X2,B2) [RXY] */ + case 0x4: /* lg r1,d2(x2,b2) */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + store_reg(r1, tmp2); + if (op == 0x2) set_cc_s64(tmp2); + tcg_temp_free(tmp2); + break; + case 0x12: /* LT R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + store_reg32(r1, tmp2); + set_cc_s32(tmp2); + tcg_temp_free(tmp2); + break; + case 0xc: /* MSG R1,D2(X2,B2) [RXY] */ + case 0x1c: /* MSGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0xc) { + tcg_gen_qemu_ld64(tmp2, tmp, 1); + } + else { + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + } + tcg_temp_free(tmp); + tmp = load_reg(r1); + tcg_gen_mul_i64(tmp, tmp, tmp2); + store_reg(r1, tmp); + tcg_temp_free(tmp2); + break; + case 0xd: /* DSG R1,D2(X2,B2) [RXY] */ + case 0x1d: /* DSGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0x1d) { + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + } + else { + tcg_gen_qemu_ld64(tmp2, tmp, 1); + } + tcg_temp_free(tmp); + tmp = load_reg(r1 + 1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_div_i64(tmp3, tmp, tmp2); + store_reg(r1 + 1, tmp3); + tcg_gen_rem_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x8: /* AG R1,D2(X2,B2) [RXY] */ + case 0xa: /* ALG R1,D2(X2,B2) [RXY] */ + case 0x18: /* AGF R1,D2(X2,B2) [RXY] */ + case 0x1a: /* ALGF R1,D2(X2,B2) [RXY] */ + if (op == 0x1a) { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + } + else if (op == 0x18) { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + } + else { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + } + tcg_temp_free(tmp); + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x8: case 0x18: gen_set_cc_add64(tmp, tmp2, tmp3); break; + case 0xa: case 0x1a: gen_helper_set_cc_addu64(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x9: /* SG R1,D2(X2,B2) [RXY] */ + case 0xb: /* SLG R1,D2(X2,B2) [RXY] */ + case 0x19: /* SGF R1,D2(X2,B2) [RXY] */ + case 0x1b: /* SLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + if (op == 0x19) { + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + } + else if (op == 0x1b) { + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + } + else { + tcg_gen_qemu_ld64(tmp2, tmp, 1); + } + tcg_temp_free(tmp); + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_sub_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x9: case 0x19: gen_helper_set_cc_sub64(cc, tmp, tmp2, tmp3); break; + case 0xb: case 0x1b: gen_helper_set_cc_subu64(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x14: /* LGF R1,D2(X2,B2) [RXY] */ + case 0x16: /* LLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + switch (op) { + case 0x14: tcg_gen_ext32s_i64(tmp2, tmp2); break; + case 0x16: break; + default: tcg_abort(); + } + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x15: /* LGH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, 1); + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x17: /* LLGT R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + tcg_gen_andi_i64(tmp2, tmp2, 0x7fffffffULL); + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x1e: /* LRV R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + tcg_gen_bswap32_i64(tmp2, tmp2); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x20: /* CG R1,D2(X2,B2) [RXY] */ + case 0x21: /* CLG R1,D2(X2,B2) */ + case 0x30: /* CGF R1,D2(X2,B2) [RXY] */ + case 0x31: /* CLGF R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + switch (op) { + case 0x20: + case 0x21: + tcg_gen_qemu_ld64(tmp2, tmp, 1); + break; + case 0x30: + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + break; + case 0x31: + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + break; + default: + tcg_abort(); + } + tcg_temp_free(tmp); + tmp = load_reg(r1); + switch (op) { + case 0x20: case 0x30: cmp_s64(tmp, tmp2); break; + case 0x21: case 0x31: cmp_u64(tmp, tmp2); break; + default: tcg_abort(); + } + tcg_temp_free(tmp2); + break; + case 0x24: /* stg r1, d2(x2,b2) */ + tmp2 = load_reg(r1); + tcg_gen_qemu_st64(tmp2, tmp, 1); + tcg_temp_free(tmp2); + break; + case 0x3e: /* STRV R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg32(r1); + tcg_gen_bswap32_i32(tmp2, tmp2); + tcg_gen_qemu_st32(tmp2, tmp, 1); + tcg_temp_free(tmp2); + break; + case 0x50: /* STY R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg32(r1); + tcg_gen_qemu_st32(tmp2, tmp, 1); + tcg_temp_free(tmp2); + break; + case 0x57: /* XY R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg32(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp, 1); + tcg_gen_xor_i32(tmp, tmp2, tmp3); + store_reg32(r1, tmp); + set_cc_nz_u32(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x58: /* LY R1,D2(X2,B2) [RXY] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp, 1); + store_reg32(r1, tmp3); + tcg_temp_free(tmp3); + break; + case 0x5a: /* AY R1,D2(X2,B2) [RXY] */ + case 0x5b: /* SY R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg32(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp3, tmp, 1); + switch (op) { + case 0x5a: tcg_gen_add_i32(tmp, tmp2, tmp3); break; + case 0x5b: tcg_gen_sub_i32(tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + store_reg32(r1, tmp); + switch (op) { + case 0x5a: gen_helper_set_cc_add32(cc, tmp2, tmp3, tmp); break; + case 0x5b: gen_helper_set_cc_sub32(cc, tmp2, tmp3, tmp); break; + default: tcg_abort(); + } + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x71: /* LAY R1,D2(X2,B2) [RXY] */ + store_reg(r1, tmp); + break; + case 0x72: /* STCY R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg32(r1); + tcg_gen_qemu_st8(tmp2, tmp, 1); + tcg_temp_free(tmp2); + break; + case 0x73: /* ICY R1,D2(X2,B2) [RXY] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp3, tmp, 1); + store_reg8(r1, tmp3); + tcg_temp_free(tmp3); + break; + case 0x76: /* LB R1,D2(X2,B2) [RXY] */ + case 0x77: /* LGB R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8s(tmp2, tmp, 1); + switch (op) { + case 0x76: + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg32(r1, tmp2); + break; + case 0x77: + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg(r1, tmp2); + break; + default: tcg_abort(); + } + tcg_temp_free(tmp2); + break; + case 0x78: /* LHY R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, 1); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x80: /* NG R1,D2(X2,B2) [RXY] */ + case 0x81: /* OG R1,D2(X2,B2) [RXY] */ + case 0x82: /* XG R1,D2(X2,B2) [RXY] */ + tmp2 = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp3, tmp, 1); + switch (op) { + case 0x80: tcg_gen_and_i64(tmp, tmp2, tmp3); break; + case 0x81: tcg_gen_or_i64(tmp, tmp2, tmp3); break; + case 0x82: tcg_gen_xor_i64(tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + store_reg(r1, tmp); + set_cc_nz_u64(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x86: /* MLG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = tcg_const_i32(r1); + gen_helper_mlg(tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x87: /* DLG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = tcg_const_i32(r1); + gen_helper_dlg(tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x88: /* ALCG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_shri_i64(tmp3, cc, 1); + tcg_gen_andi_i64(tmp3, tmp3, 1); + tcg_gen_add_i64(tmp3, tmp2, tmp3);; + tcg_gen_add_i64(tmp3, tmp, tmp3); + store_reg(r1, tmp3); + gen_helper_set_cc_addc_u64(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x89: /* SLBG R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = load_reg(r1); + tmp3 = tcg_const_i32(r1); + gen_helper_slbg(cc, cc, tmp3, tmp, tmp2); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x90: /* LLGC R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x91: /* LLGH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(tmp2, tmp, 1); + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x94: /* LLC R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x95: /* LLH R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(tmp2, tmp, 1); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x98: /* ALC R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = tcg_const_i32(r1); + gen_helper_addc_u32(cc, cc, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x99: /* SLB R1,D2(X2,B2) [RXY] */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = load_reg32(r1); + tmp3 = tcg_const_i32(r1); + gen_helper_slb(cc, cc, tmp3, tmp, tmp2); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal e3 operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } + tcg_temp_free(tmp); +} + +static void disas_eb(DisasContext *s, int op, int r1, int r3, int b2, int d2) +{ + TCGv tmp, tmp2, tmp3, tmp4; + int i; + + LOG_DISAS("disas_eb: op 0x%x r1 %d r3 %d b2 %d d2 0x%x\n", op, r1, r3, b2, d2); + switch (op) { + case 0xc: /* SRLG R1,R3,D2(B2) [RSY] */ + case 0xd: /* SLLG R1,R3,D2(B2) [RSY] */ + case 0xa: /* SRAG R1,R3,D2(B2) [RSY] */ + case 0x1c: /* RLLG R1,R3,D2(B2) [RSY] */ + if (b2) { + tmp = get_address(0, b2, d2); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + } else { + tmp = tcg_const_i64(d2 & 0x3f); + } + tmp2 = load_reg(r3); + tmp3 = tcg_temp_new_i64(); + switch (op) { + case 0xc: tcg_gen_shr_i64(tmp3, tmp2, tmp); break; + case 0xd: tcg_gen_shl_i64(tmp3, tmp2, tmp); break; + case 0xa: tcg_gen_sar_i64(tmp3, tmp2, tmp); break; + case 0x1c: tcg_gen_rotl_i64(tmp3, tmp2, tmp); break; + default: tcg_abort(); break; + } + store_reg(r1, tmp3); + if (op == 0xa) set_cc_s64(tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x1d: /* RLL R1,R3,D2(B2) [RSY] */ + if (b2) { + tmp = get_address(0, b2, d2); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + } else { + tmp = tcg_const_i64(d2 & 0x3f); + } + tmp2 = load_reg32(r3); + tmp3 = tcg_temp_new_i32(); + switch (op) { + case 0x1d: tcg_gen_rotl_i32(tmp3, tmp2, tmp); break; + default: tcg_abort(); break; + } + store_reg32(r1, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x4: /* LMG R1,R3,D2(B2) [RSY] */ + case 0x24: /* stmg */ + /* Apparently, unrolling lmg/stmg of any size gains performance - + even for very long ones... */ + if (r3 > r1) { + tmp = get_address(0, b2, d2); + for (i = r1; i <= r3; i++) { + if (op == 0x4) { + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + store_reg(i, tmp2); + /* At least one register is usually read after an lmg + (br %rsomething), which is why freeing them is + detrimental to performance */ + } + else { + tmp2 = load_reg(i); + tcg_gen_qemu_st64(tmp2, tmp, 1); + /* R15 is usually read after an stmg; other registers + generally aren't and can be free'd */ + if (i != 15) tcg_temp_free(tmp2); + } + tcg_gen_addi_i64(tmp, tmp, 8); + } + } + else { + tmp = tcg_const_i32(r1); + tmp2 = tcg_const_i32(r3); + tmp3 = tcg_const_i32(b2); + tmp4 = tcg_const_i32(d2); + if (op == 0x4) gen_helper_lmg(tmp, tmp2, tmp3, tmp4); + else gen_helper_stmg(tmp, tmp2, tmp3, tmp4); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + tcg_temp_free(tmp4); + } + tcg_temp_free(tmp); + break; + case 0x2c: /* STCMH R1,M3,D2(B2) [RSY] */ + tmp2 = get_address(0, b2, d2); + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + gen_helper_stcmh(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x30: /* CSG R1,R3,D2(B2) [RSY] */ + tmp2 = get_address(0, b2, d2); + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + gen_helper_csg(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x3e: /* CDSG R1,R3,D2(B2) [RSY] */ + tmp2 = get_address(0, b2, d2); + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + gen_helper_cdsg(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x51: /* TMY D1(B1),I2 [SIY] */ + tmp = get_address(0, b2, d2); /* SIY -> this is the destination */ + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + tcg_temp_free(tmp); + tmp = tcg_const_i32((r1 << 4) | r3); + gen_helper_tm(cc, tmp2, tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x52: /* MVIY D1(B1),I2 [SIY] */ + tmp2 = tcg_const_i32((r1 << 4) | r3); + tmp = get_address(0, b2, d2); /* SIY -> this is the destination */ + tcg_gen_qemu_st8(tmp2, tmp, 1); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x55: /* CLIY D1(B1),I2 [SIY] */ + tmp3 = get_address(0, b2, d2); /* SIY -> this is the 1st operand */ + tmp = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp, tmp3, 1); + cmp_u32c(tmp, (r1 << 4) | r3); + tcg_temp_free(tmp); + tcg_temp_free(tmp3); + break; + case 0x80: /* ICMH R1,M3,D2(B2) [RSY] */ + tmp2 = get_address(0, b2, d2); + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + gen_helper_icmh(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal eb operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } +} + +static void disas_ed(DisasContext *s, int op, int r1, int x2, int b2, int d2, int r1b) +{ + TCGv_i32 tmp; + TCGv tmp2, tmp3; + tmp2 = get_address(x2, b2, d2); + tmp = tcg_const_i32(r1); + switch (op) { + case 0x5: /* LXDB R1,D2(X2,B2) [RXE] */ + gen_helper_lxdb(tmp, tmp2); + break; + case 0x9: /* CEB R1,D2(X2,B2) [RXE] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp2, 1); + gen_helper_ceb(cc, tmp, tmp3); + tcg_temp_free(tmp3); + break; + case 0xa: /* AEB R1,D2(X2,B2) [RXE] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp2, 1); + gen_helper_aeb(cc, tmp, tmp3); + tcg_temp_free(tmp3); + break; + case 0xb: /* SEB R1,D2(X2,B2) [RXE] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp2, 1); + gen_helper_seb(cc, tmp, tmp3); + tcg_temp_free(tmp3); + break; + case 0xd: /* DEB R1,D2(X2,B2) [RXE] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp2, 1); + gen_helper_deb(tmp, tmp3); + tcg_temp_free(tmp3); + break; + case 0x10: /* TCEB R1,D2(X2,B2) [RXE] */ + gen_helper_tceb(cc, tmp, tmp2); + break; + case 0x11: /* TCDB R1,D2(X2,B2) [RXE] */ + gen_helper_tcdb(cc, tmp, tmp2); + break; + case 0x12: /* TCXB R1,D2(X2,B2) [RXE] */ + gen_helper_tcxb(cc, tmp, tmp2); + break; + case 0x17: /* MEEB R1,D2(X2,B2) [RXE] */ + tmp3 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp3, tmp2, 1); + gen_helper_meeb(tmp, tmp3); + tcg_temp_free(tmp3); + break; + case 0x19: /* CDB R1,D2(X2,B2) [RXE] */ + gen_helper_cdb(cc, tmp, tmp2); + break; + case 0x1a: /* ADB R1,D2(X2,B2) [RXE] */ + gen_helper_adb(cc, tmp, tmp2); + break; + case 0x1b: /* SDB R1,D2(X2,B2) [RXE] */ + gen_helper_sdb(cc, tmp, tmp2); + break; + case 0x1c: /* MDB R1,D2(X2,B2) [RXE] */ + gen_helper_mdb(tmp, tmp2); + break; + case 0x1d: /* DDB R1,D2(X2,B2) [RXE] */ + gen_helper_ddb(tmp, tmp2); + break; + case 0x1e: /* MADB R1,R3,D2(X2,B2) [RXF] */ + /* for RXF insns, r1 is R3 and r1b is R1 */ + tmp3 = tcg_const_i32(r1b); + gen_helper_madb(tmp3, tmp2, tmp); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal ed operation 0x%x\n", op); + gen_illegal_opcode(s); + return; + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); +} + +static void disas_a5(DisasContext *s, int op, int r1, int i2) +{ + TCGv tmp, tmp2; + uint64_t vtmp; + LOG_DISAS("disas_a5: op 0x%x r1 %d i2 0x%x\n", op, r1, i2); + switch (op) { + case 0x0: /* IIHH R1,I2 [RI] */ + case 0x1: /* IIHL R1,I2 [RI] */ + tmp = load_reg(r1); + vtmp = i2; + switch (op) { + case 0x0: tcg_gen_andi_i64(tmp, tmp, 0x0000ffffffffffffULL); vtmp <<= 48; break; + case 0x1: tcg_gen_andi_i64(tmp, tmp, 0xffff0000ffffffffULL); vtmp <<= 32; break; + default: tcg_abort(); + } + tcg_gen_ori_i64(tmp, tmp, vtmp); + store_reg(r1, tmp); + break; + case 0x4: /* NIHH R1,I2 [RI] */ + case 0x8: /* OIHH R1,I2 [RI] */ + tmp = load_reg(r1); + switch (op) { + case 0x4: + tmp2 = tcg_const_i64( (((uint64_t)i2) << 48) | 0x0000ffffffffffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0x8: + tmp2 = tcg_const_i64(((uint64_t)i2) << 48); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp2, tmp, 48); + tcg_gen_trunc_i64_i32(tmp2, tmp2); + set_cc_nz_u32(tmp2); + tcg_temp_free(tmp2); + break; + case 0x5: /* NIHL R1,I2 [RI] */ + case 0x9: /* OIHL R1,I2 [RI] */ + tmp = load_reg(r1); + switch (op) { + case 0x5: + tmp2 = tcg_const_i64( (((uint64_t)i2) << 32) | 0xffff0000ffffffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0x9: + tmp2 = tcg_const_i64(((uint64_t)i2) << 32); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp2, tmp, 32); + tcg_gen_trunc_i64_i32(tmp2, tmp2); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff); + set_cc_nz_u32(tmp2); + tcg_temp_free(tmp2); + break; + case 0x6: /* NILH R1,I2 [RI] */ + case 0xa: /* OILH R1,I2 [RI] */ + tmp = load_reg(r1); + switch (op) { + case 0x6: + tmp2 = tcg_const_i64( (((uint64_t)i2) << 16) | 0xffffffff0000ffffULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0xa: + tmp2 = tcg_const_i64(((uint64_t)i2) << 16); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shri_i64(tmp2, tmp, 16); + tcg_gen_trunc_i64_i32(tmp2, tmp2); + tcg_gen_andi_i32(tmp2, tmp2, 0xffff); + set_cc_nz_u32(tmp2); + tcg_temp_free(tmp2); + break; + case 0x7: /* NILL R1,I2 [RI] */ + case 0xb: /* OILL R1,I2 [RI] */ + tmp = load_reg(r1); + switch (op) { + case 0x7: + tmp2 = tcg_const_i64(i2 | 0xffffffffffff0000ULL); + tcg_gen_and_i64(tmp, tmp, tmp2); + break; + case 0xb: + tmp2 = tcg_const_i64(i2); + tcg_gen_or_i64(tmp, tmp, tmp2); + break; + default: tcg_abort(); break; + } + store_reg(r1, tmp); + tcg_gen_trunc_i64_i32(tmp, tmp); + tcg_gen_andi_i32(tmp, tmp, 0xffff); + set_cc_nz_u32(tmp); /* signedness should not matter here */ + tcg_temp_free(tmp2); + break; + case 0xc: /* LLIHH R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 48 ); + store_reg(r1, tmp); + break; + case 0xd: /* LLIHL R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 32 ); + store_reg(r1, tmp); + break; + case 0xe: /* LLILH R1,I2 [RI] */ + tmp = tcg_const_i64( ((uint64_t)i2) << 16 ); + store_reg(r1, tmp); + break; + case 0xf: /* LLILL R1,I2 [RI] */ + tmp = tcg_const_i64(i2); + store_reg(r1, tmp); + break; + default: + LOG_DISAS("illegal a5 operation 0x%x\n", op); + gen_illegal_opcode(s); + return; + } + tcg_temp_free(tmp); +} + +static void disas_a7(DisasContext *s, int op, int r1, int i2) +{ + TCGv tmp, tmp2, tmp3; + LOG_DISAS("disas_a7: op 0x%x r1 %d i2 0x%x\n", op, r1, i2); + switch (op) { + case 0x0: /* TMLH or TMH R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_shri_i64(tmp, tmp, 16); + tmp2 = tcg_const_i32((uint16_t)i2); + gen_helper_tmxx(cc, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x1: /* TMLL or TML R1,I2 [RI] */ + tmp = load_reg(r1); + tmp2 = tcg_const_i32((uint16_t)i2); + gen_helper_tmxx(cc, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x2: /* TMHH R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_shri_i64(tmp, tmp, 48); + tmp2 = tcg_const_i32((uint16_t)i2); + gen_helper_tmxx(cc, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x3: /* TMHL R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_shri_i64(tmp, tmp, 32); + tmp2 = tcg_const_i32((uint16_t)i2); + gen_helper_tmxx(cc, tmp, tmp2); + tcg_temp_free(tmp2); + break; + case 0x4: /* brc m1, i2 */ + gen_brc(r1, s, i2 * 2); + return; + case 0x5: /* BRAS R1,I2 [RI] */ + tmp = tcg_const_i64(s->pc + 4); + store_reg(r1, tmp); + tcg_temp_free(tmp); + tmp = tcg_const_i64(s->pc + i2 * 2); + tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUState, psw.addr)); + s->is_jmp = DISAS_JUMP; + break; + case 0x6: /* BRCT R1,I2 [RI] */ + tmp = load_reg32(r1); + tcg_gen_subi_i32(tmp, tmp, 1); + store_reg32(r1, tmp); + tmp2 = tcg_const_i64(s->pc); + tmp3 = tcg_const_i32(i2 * 2); + gen_helper_brct(tmp, tmp2, tmp3); + s->is_jmp = DISAS_JUMP; + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x7: /* BRCTG R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_subi_i64(tmp, tmp, 1); + store_reg(r1, tmp); + tmp2 = tcg_const_i64(s->pc); + tmp3 = tcg_const_i32(i2 * 2); + gen_helper_brctg(tmp, tmp2, tmp3); + s->is_jmp = DISAS_JUMP; + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x8: /* lhi r1, i2 */ + tmp = tcg_const_i32(i2); + store_reg32(r1, tmp); + break; + case 0x9: /* lghi r1, i2 */ + tmp = tcg_const_i64(i2); + store_reg(r1, tmp); + break; + case 0xa: /* AHI R1,I2 [RI] */ + tmp = load_reg32(r1); + tmp3 = tcg_temp_new_i32(); + tcg_gen_addi_i32(tmp3, tmp, i2); + store_reg32(r1, tmp3); + tmp2 = tcg_const_i32(i2); + gen_helper_set_cc_add32(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xb: /* aghi r1, i2 */ + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_addi_i64(tmp3, tmp, i2); + store_reg(r1, tmp3); + tmp2 = tcg_const_i64(i2); + gen_set_cc_add64(tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xc: /* MHI R1,I2 [RI] */ + tmp = load_reg32(r1); + tcg_gen_muli_i32(tmp, tmp, i2); + store_reg32(r1, tmp); + break; + case 0xd: /* MGHI R1,I2 [RI] */ + tmp = load_reg(r1); + tcg_gen_muli_i64(tmp, tmp, i2); + store_reg(r1, tmp); + break; + case 0xe: /* CHI R1,I2 [RI] */ + tmp = load_reg32(r1); + cmp_s32c(tmp, i2); + break; + case 0xf: /* CGHI R1,I2 [RI] */ + tmp = load_reg(r1); + cmp_s64c(tmp, i2); + break; + default: + LOG_DISAS("illegal a7 operation 0x%x\n", op); + gen_illegal_opcode(s); + return; + } + tcg_temp_free(tmp); +} + +static void disas_b2(DisasContext *s, int op, int r1, int r2) +{ + TCGv_i32 tmp, tmp2, tmp3; + LOG_DISAS("disas_b2: op 0x%x r1 %d r2 %d\n", op, r1, r2); + switch (op) { + case 0x22: /* IPM R1 [RRE] */ + tmp = tcg_const_i32(r1); + gen_helper_ipm(cc, tmp); + break; + case 0x4e: /* SAR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, aregs[r1])); + break; + case 0x4f: /* EAR R1,R2 [RRE] */ + tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, aregs[r2])); + store_reg32(r1, tmp); + break; + case 0x52: /* MSR R1,R2 [RRE] */ + tmp = load_reg32(r1); + tmp2 = load_reg32(r2); + tcg_gen_mul_i32(tmp, tmp, tmp2); + store_reg32(r1, tmp); + tcg_temp_free(tmp2); + break; + case 0x55: /* MVST R1,R2 [RRE] */ + tmp = load_reg32(0); + tmp2 = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r2); + gen_helper_mvst(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x5d: /* CLST R1,R2 [RRE] */ + tmp = load_reg32(0); + tmp2 = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r2); + gen_helper_clst(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x5e: /* SRST R1,R2 [RRE] */ + tmp = load_reg32(0); + tmp2 = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r2); + gen_helper_srst(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal b2 operation 0x%x\n", op); + gen_illegal_opcode(s); + return; + } + tcg_temp_free(tmp); +} + +static void disas_b3(DisasContext *s, int op, int m3, int r1, int r2) +{ + TCGv_i32 tmp, tmp2, tmp3; + LOG_DISAS("disas_b3: op 0x%x m3 0x%x r1 %d r2 %d\n", op, m3, r1, r2); +#define FP_HELPER(i) \ + tmp = tcg_const_i32(r1); \ + tmp2 = tcg_const_i32(r2); \ + gen_helper_ ## i (tmp, tmp2); \ + tcg_temp_free(tmp); \ + tcg_temp_free(tmp2); + +#define FP_HELPER_CC(i) \ + tmp = tcg_const_i32(r1); \ + tmp2 = tcg_const_i32(r2); \ + gen_helper_ ## i (cc, tmp, tmp2); \ + tcg_temp_free(tmp); \ + tcg_temp_free(tmp2); + + switch (op) { + case 0x0: /* LPEBR R1,R2 [RRE] */ + FP_HELPER_CC(lpebr); break; + case 0x2: /* LTEBR R1,R2 [RRE] */ + FP_HELPER_CC(ltebr); break; + case 0x3: /* LCEBR R1,R2 [RRE] */ + FP_HELPER_CC(lcebr); break; + case 0x4: /* LDEBR R1,R2 [RRE] */ + FP_HELPER(ldebr); break; + case 0x5: /* LXDBR R1,R2 [RRE] */ + FP_HELPER(lxdbr); break; + case 0x9: /* CEBR R1,R2 [RRE] */ + FP_HELPER_CC(cebr); break; + case 0xa: /* AEBR R1,R2 [RRE] */ + FP_HELPER_CC(aebr); break; + case 0xb: /* SEBR R1,R2 [RRE] */ + FP_HELPER_CC(sebr); break; + case 0xd: /* DEBR R1,R2 [RRE] */ + FP_HELPER(debr); break; + case 0x10: /* LPDBR R1,R2 [RRE] */ + FP_HELPER_CC(lpdbr); break; + case 0x12: /* LTDBR R1,R2 [RRE] */ + FP_HELPER_CC(ltdbr); break; + case 0x13: /* LCDBR R1,R2 [RRE] */ + FP_HELPER_CC(lcdbr); break; + case 0x15: /* SQBDR R1,R2 [RRE] */ + FP_HELPER(sqdbr); break; + case 0x17: /* MEEBR R1,R2 [RRE] */ + FP_HELPER(meebr); break; + case 0x19: /* CDBR R1,R2 [RRE] */ + FP_HELPER_CC(cdbr); break; + case 0x1a: /* ADBR R1,R2 [RRE] */ + FP_HELPER_CC(adbr); break; + case 0x1b: /* SDBR R1,R2 [RRE] */ + FP_HELPER_CC(sdbr); break; + case 0x1c: /* MDBR R1,R2 [RRE] */ + FP_HELPER(mdbr); break; + case 0x1d: /* DDBR R1,R2 [RRE] */ + FP_HELPER(ddbr); break; + case 0xe: /* MAEBR R1,R3,R2 [RRF] */ + case 0x1e: /* MADBR R1,R3,R2 [RRF] */ + case 0x1f: /* MSDBR R1,R3,R2 [RRF] */ + /* for RRF insns, m3 is R1, r1 is R3, and r2 is R2 */ + tmp = tcg_const_i32(m3); + tmp2 = tcg_const_i32(r2); + tmp3 = tcg_const_i32(r1); + switch (op) { + case 0xe: gen_helper_maebr(tmp, tmp3, tmp2); break; + case 0x1e: gen_helper_madbr(tmp, tmp3, tmp2); break; + case 0x1f: gen_helper_msdbr(tmp, tmp3, tmp2); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x40: /* LPXBR R1,R2 [RRE] */ + FP_HELPER_CC(lpxbr); break; + case 0x42: /* LTXBR R1,R2 [RRE] */ + FP_HELPER_CC(ltxbr); break; + case 0x43: /* LCXBR R1,R2 [RRE] */ + FP_HELPER_CC(lcxbr); break; + case 0x44: /* LEDBR R1,R2 [RRE] */ + FP_HELPER(ledbr); break; + case 0x45: /* LDXBR R1,R2 [RRE] */ + FP_HELPER(ldxbr); break; + case 0x46: /* LEXBR R1,R2 [RRE] */ + FP_HELPER(lexbr); break; + case 0x49: /* CXBR R1,R2 [RRE] */ + FP_HELPER_CC(cxbr); break; + case 0x4a: /* AXBR R1,R2 [RRE] */ + FP_HELPER_CC(axbr); break; + case 0x4b: /* SXBR R1,R2 [RRE] */ + FP_HELPER_CC(sxbr); break; + case 0x4c: /* MXBR R1,R2 [RRE] */ + FP_HELPER(mxbr); break; + case 0x4d: /* DXBR R1,R2 [RRE] */ + FP_HELPER(dxbr); break; + case 0x65: /* LXR R1,R2 [RRE] */ + tmp = load_freg(r2); + store_freg(r1, tmp); + tcg_temp_free(tmp); + tmp = load_freg(r2 + 2); + store_freg(r1 + 2, tmp); + tcg_temp_free(tmp); + break; + case 0x74: /* LZER R1 [RRE] */ + tmp = tcg_const_i32(r1); + gen_helper_lzer(tmp); + tcg_temp_free(tmp); + break; + case 0x75: /* LZDR R1 [RRE] */ + tmp = tcg_const_i32(r1); + gen_helper_lzdr(tmp); + tcg_temp_free(tmp); + break; + case 0x76: /* LZXR R1 [RRE] */ + tmp = tcg_const_i32(r1); + gen_helper_lzxr(tmp); + tcg_temp_free(tmp); + break; + case 0x84: /* SFPC R1 [RRE] */ + tmp = load_reg32(r1); + tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, fpc)); + tcg_temp_free(tmp); + break; + case 0x8c: /* EFPC R1 [RRE] */ + tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, fpc)); + store_reg32(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x94: /* CEFBR R1,R2 [RRE] */ + case 0x95: /* CDFBR R1,R2 [RRE] */ + case 0x96: /* CXFBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = load_reg32(r2); + switch (op) { + case 0x94: gen_helper_cefbr(tmp, tmp2); break; + case 0x95: gen_helper_cdfbr(tmp, tmp2); break; + case 0x96: gen_helper_cxfbr(tmp, tmp2); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x98: /* CFEBR R1,R2 [RRE] */ + case 0x99: /* CFDBR R1,R2 [RRE] */ + case 0x9a: /* CFXBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = tcg_const_i32(r2); + tmp3 = tcg_const_i32(m3); + switch (op) { + case 0x98: gen_helper_cfebr(cc, tmp, tmp2, tmp3); break; + case 0x99: gen_helper_cfdbr(cc, tmp, tmp2, tmp3); break; + case 0x9a: gen_helper_cfxbr(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xa4: /* CEGBR R1,R2 [RRE] */ + case 0xa5: /* CDGBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = load_reg(r2); + switch (op) { + case 0xa4: gen_helper_cegbr(tmp, tmp2); break; + case 0xa5: gen_helper_cdgbr(tmp, tmp2); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0xa6: /* CXGBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = load_reg(r2); + gen_helper_cxgbr(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0xa8: /* CGEBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = tcg_const_i32(r2); + tmp3 = tcg_const_i32(m3); + gen_helper_cgebr(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xa9: /* CGDBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = tcg_const_i32(r2); + tmp3 = tcg_const_i32(m3); + gen_helper_cgdbr(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xaa: /* CGXBR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = tcg_const_i32(r2); + tmp3 = tcg_const_i32(m3); + gen_helper_cgxbr(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal b3 operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } +} + +static void disas_b9(DisasContext *s, int op, int r1, int r2) +{ + TCGv tmp, tmp2, tmp3; + LOG_DISAS("disas_b9: op 0x%x r1 %d r2 %d\n", op, r1, r2); + switch (op) { + case 0: /* LPGR R1,R2 [RRE] */ + case 0x10: /* LPGFR R1,R2 [RRE] */ + if (op == 0) { + tmp2 = load_reg(r2); + } + else { + tmp2 = load_reg32(r2); + tcg_gen_ext32s_i64(tmp2, tmp2); + } + tmp = tcg_const_i32(r1); + gen_helper_abs_i64(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 1: /* LNGR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + tmp = tcg_const_i32(r1); + gen_helper_nabs_i64(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 2: /* LTGR R1,R2 [RRE] */ + tmp = load_reg(r2); + if (r1 != r2) store_reg(r1, tmp); + set_cc_s64(tmp); + tcg_temp_free(tmp); + break; + case 3: /* LCGR R1,R2 [RRE] */ + case 0x13: /* LCGFR R1,R2 [RRE] */ + if (op == 0x13) { + tmp = load_reg32(r2); + tcg_gen_ext32s_i64(tmp, tmp); + } + else { + tmp = load_reg(r2); + } + tcg_gen_neg_i64(tmp, tmp); + store_reg(r1, tmp); + gen_helper_set_cc_comp_s64(cc, tmp); + tcg_temp_free(tmp); + break; + case 4: /* LGR R1,R2 [RRE] */ + tmp = load_reg(r2); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x6: /* LGBR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + tcg_gen_ext8s_i64(tmp2, tmp2); + store_reg(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 8: /* AGR R1,R2 [RRE] */ + case 0xa: /* ALGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x8: gen_set_cc_add64(tmp, tmp2, tmp3); break; + case 0xa: gen_helper_set_cc_addu64(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 9: /* SGR R1,R2 [RRE] */ + case 0xb: /* SLGR R1,R2 [RRE] */ + case 0x1b: /* SLGFR R1,R2 [RRE] */ + case 0x19: /* SGFR R1,R2 [RRE] */ + tmp = load_reg(r1); + switch (op) { + case 0x1b: case 0x19: + tmp2 = load_reg32(r2); + if (op == 0x19) tcg_gen_ext32s_i64(tmp2, tmp2); + else tcg_gen_ext32u_i64(tmp2, tmp2); + break; + default: + tmp2 = load_reg(r2); + break; + } + tmp3 = tcg_temp_new_i64(); + tcg_gen_sub_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 9: case 0x19: gen_helper_set_cc_sub64(cc, tmp,tmp2,tmp3); break; + case 0xb: case 0x1b: gen_helper_set_cc_subu64(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xc: /* MSGR R1,R2 [RRE] */ + case 0x1c: /* MSGFR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + if (op == 0x1c) tcg_gen_ext32s_i64(tmp2, tmp2); + tcg_gen_mul_i64(tmp, tmp, tmp2); + store_reg(r1, tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0xd: /* DSGR R1,R2 [RRE] */ + case 0x1d: /* DSGFR R1,R2 [RRE] */ + tmp = load_reg(r1 + 1); + if (op == 0xd) { + tmp2 = load_reg(r2); + } + else { + tmp2 = load_reg32(r2); + tcg_gen_ext32s_i64(tmp2, tmp2); + } + tmp3 = tcg_temp_new_i64(); + tcg_gen_div_i64(tmp3, tmp, tmp2); + store_reg(r1 + 1, tmp3); + tcg_gen_rem_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x14: /* LGFR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(tmp2, tmp); + store_reg(r1, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x16: /* LLGFR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tcg_gen_ext32u_i64(tmp, tmp); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x17: /* LLGTR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL); + tcg_gen_ext32u_i64(tmp, tmp); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x18: /* AGFR R1,R2 [RRE] */ + case 0x1a: /* ALGFR R1,R2 [RRE] */ + tmp2 = load_reg32(r2); + switch (op) { + case 0x18: tcg_gen_ext32s_i64(tmp2, tmp2); break; + case 0x1a: tcg_gen_ext32u_i64(tmp2, tmp2); break; + default: tcg_abort(); + } + tmp = load_reg(r1); + tmp3 = tcg_temp_new_i64(); + tcg_gen_add_i64(tmp3, tmp, tmp2); + store_reg(r1, tmp3); + switch (op) { + case 0x18: gen_set_cc_add64(tmp, tmp2, tmp3); break; + case 0x1a: gen_helper_set_cc_addu64(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x20: /* CGR R1,R2 [RRE] */ + case 0x30: /* CGFR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + if (op == 0x30) tcg_gen_ext32s_i64(tmp2, tmp2); + tmp = load_reg(r1); + cmp_s64(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x21: /* CLGR R1,R2 [RRE] */ + case 0x31: /* CLGFR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + if (op == 0x31) tcg_gen_ext32u_i64(tmp2, tmp2); + tmp = load_reg(r1); + cmp_u64(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x26: /* LBR R1,R2 [RRE] */ + tmp2 = load_reg32(r2); + tcg_gen_ext8s_i32(tmp2, tmp2); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x27: /* LHR R1,R2 [RRE] */ + tmp2 = load_reg32(r2); + tcg_gen_ext16s_i32(tmp2, tmp2); + store_reg32(r1, tmp2); + tcg_temp_free(tmp2); + break; + case 0x80: /* NGR R1,R2 [RRE] */ + case 0x81: /* OGR R1,R2 [RRE] */ + case 0x82: /* XGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + switch (op) { + case 0x80: tcg_gen_and_i64(tmp, tmp, tmp2); break; + case 0x81: tcg_gen_or_i64(tmp, tmp, tmp2); break; + case 0x82: tcg_gen_xor_i64(tmp, tmp, tmp2); break; + default: tcg_abort(); + } + store_reg(r1, tmp); + set_cc_nz_u64(tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x83: /* FLOGR R1,R2 [RRE] */ + tmp2 = load_reg(r2); + tmp = tcg_const_i32(r1); + gen_helper_flogr(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x84: /* LLGCR R1,R2 [RRE] */ + tmp = load_reg(r2); + tcg_gen_andi_i64(tmp, tmp, 0xff); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x85: /* LLGHR R1,R2 [RRE] */ + tmp = load_reg(r2); + tcg_gen_andi_i64(tmp, tmp, 0xffff); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x87: /* DLGR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = load_reg(r2); + gen_helper_dlg(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x88: /* ALCGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = tcg_temp_new_i64(); + tcg_gen_shri_i64(tmp3, cc, 1); + tcg_gen_andi_i64(tmp3, tmp3, 1); + tcg_gen_add_i64(tmp3, tmp2, tmp3); + tcg_gen_add_i64(tmp3, tmp, tmp3); + store_reg(r1, tmp3); + gen_helper_set_cc_addc_u64(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x89: /* SLBGR R1,R2 [RRE] */ + tmp = load_reg(r1); + tmp2 = load_reg(r2); + tmp3 = tcg_const_i32(r1); + gen_helper_slbg(cc, cc, tmp3, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x94: /* LLCR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tcg_gen_andi_i32(tmp, tmp, 0xff); + store_reg32(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x95: /* LLHR R1,R2 [RRE] */ + tmp = load_reg32(r2); + tcg_gen_andi_i32(tmp, tmp, 0xffff); + store_reg32(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x98: /* ALCR R1,R2 [RRE] */ + tmp = tcg_const_i32(r1); + tmp2 = load_reg32(r2); + gen_helper_addc_u32(cc, cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x99: /* SLBR R1,R2 [RRE] */ + tmp = load_reg32(r1); + tmp2 = load_reg32(r2); + tmp3 = tcg_const_i32(r1); + gen_helper_slb(cc, cc, tmp3, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + default: + LOG_DISAS("illegal b9 operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } +} + +static void disas_c0(DisasContext *s, int op, int r1, int i2) +{ + TCGv tmp, tmp2, tmp3; + LOG_DISAS("disas_c0: op 0x%x r1 %d i2 %d\n", op, r1, i2); + uint64_t target = s->pc + i2 * 2; + /* FIXME: huh? */ target &= 0xffffffff; + switch (op) { + case 0: /* larl r1, i2 */ + tmp = tcg_const_i64(target); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x1: /* LGFI R1,I2 [RIL] */ + tmp = tcg_const_i64((int64_t)i2); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0x4: /* BRCL M1,I2 [RIL] */ + tmp = tcg_const_i32(r1); /* aka m1 */ + tmp2 = tcg_const_i64(s->pc); + tmp3 = tcg_const_i64(i2 * 2); + gen_helper_brcl(cc, tmp, tmp2, tmp3); + s->is_jmp = DISAS_JUMP; + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x5: /* brasl r1, i2 */ + tmp = tcg_const_i64(s->pc + 6); + store_reg(r1, tmp); + tmp = tcg_const_i64(target); + tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUState, psw.addr)); + s->is_jmp = DISAS_JUMP; + tcg_temp_free(tmp); + break; + case 0x7: /* XILF R1,I2 [RIL] */ + case 0xb: /* NILF R1,I2 [RIL] */ + case 0xd: /* OILF R1,I2 [RIL] */ + tmp = load_reg32(r1); + switch (op) { + case 0x7: tcg_gen_xori_i32(tmp, tmp, (uint32_t)i2); break; + case 0xb: tcg_gen_andi_i32(tmp, tmp, (uint32_t)i2); break; + case 0xd: tcg_gen_ori_i32(tmp, tmp, (uint32_t)i2); break; + default: tcg_abort(); + } + store_reg32(r1, tmp); + tcg_gen_trunc_i64_i32(tmp, tmp); + set_cc_nz_u32(tmp); + tcg_temp_free(tmp); + break; + case 0x9: /* IILF R1,I2 [RIL] */ + tmp = tcg_const_i32((uint32_t)i2); + store_reg32(r1, tmp); + tcg_temp_free(tmp); + break; + case 0xa: /* NIHF R1,I2 [RIL] */ + tmp = load_reg(r1); + switch (op) { + case 0xa: tcg_gen_andi_i64(tmp, tmp, (((uint64_t)((uint32_t)i2)) << 32) | 0xffffffffULL); break; + default: tcg_abort(); + } + store_reg(r1, tmp); + tcg_gen_shr_i64(tmp, tmp, 32); + tcg_gen_trunc_i64_i32(tmp, tmp); + set_cc_nz_u32(tmp); + tcg_temp_free(tmp); + break; + case 0xe: /* LLIHF R1,I2 [RIL] */ + tmp = tcg_const_i64(((uint64_t)(uint32_t)i2) << 32); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + case 0xf: /* LLILF R1,I2 [RIL] */ + tmp = tcg_const_i64((uint32_t)i2); + store_reg(r1, tmp); + tcg_temp_free(tmp); + break; + default: + LOG_DISAS("illegal c0 operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } +} + +static void disas_c2(DisasContext *s, int op, int r1, int i2) +{ + TCGv tmp, tmp2, tmp3; + switch (op) { + case 0x4: /* SLGFI R1,I2 [RIL] */ + case 0xa: /* ALGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + tmp2 = tcg_const_i64((uint64_t)(uint32_t)i2); + tmp3 = tcg_temp_new_i64(); + switch (op) { + case 0x4: + tcg_gen_sub_i64(tmp3, tmp, tmp2); + gen_helper_set_cc_subu64(cc, tmp, tmp2, tmp3); + break; + case 0xa: + tcg_gen_add_i64(tmp3, tmp, tmp2); + gen_helper_set_cc_addu64(cc, tmp, tmp2, tmp3); + break; + default: tcg_abort(); + } + store_reg(r1, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0x5: /* SLFI R1,I2 [RIL] */ + case 0xb: /* ALFI R1,I2 [RIL] */ + tmp = load_reg32(r1); + tmp2 = tcg_const_i32(i2); + tmp3 = tcg_temp_new_i32(); + switch (op) { + case 0x5: + tcg_gen_sub_i32(tmp3, tmp, tmp2); + gen_helper_set_cc_subu32(cc, tmp, tmp2, tmp3); + break; + case 0xb: + tcg_gen_add_i32(tmp3, tmp, tmp2); + gen_helper_set_cc_addu32(cc, tmp, tmp2, tmp3); + break; + default: tcg_abort(); + } + store_reg32(r1, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + break; + case 0xc: /* CGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + cmp_s64c(tmp, (int64_t)i2); + tcg_temp_free(tmp); + break; + case 0xe: /* CLGFI R1,I2 [RIL] */ + tmp = load_reg(r1); + cmp_u64c(tmp, (uint64_t)(uint32_t)i2); + tcg_temp_free(tmp); + break; + case 0xd: /* CFI R1,I2 [RIL] */ + case 0xf: /* CLFI R1,I2 [RIL] */ + tmp = load_reg32(r1); + switch (op) { + case 0xd: cmp_s32c(tmp, i2); break; + case 0xf: cmp_u32c(tmp, i2); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + break; + default: + LOG_DISAS("illegal c2 operation 0x%x\n", op); + gen_illegal_opcode(s); + break; + } +} + +static inline uint64_t ld_code2(uint64_t pc) +{ + return (uint64_t)lduw_code(pc); +} + +static inline uint64_t ld_code4(uint64_t pc) +{ + return (uint64_t)ldl_code(pc); +} + +static inline uint64_t ld_code6(uint64_t pc) +{ + uint64_t opc; + opc = (uint64_t)lduw_code(pc) << 32; + opc |= (uint64_t)(unsigned int)ldl_code(pc+2); + return opc; +} + +static void disas_s390_insn(CPUState *env, DisasContext *s) +{ + TCGv tmp, tmp2, tmp3; + unsigned char opc; + uint64_t insn; + int op, r1, r2, r3, d1, d2, x2, b1, b2, i, i2, r1b; + TCGv vl, vd1, vd2, vb; + + opc = ldub_code(s->pc); + LOG_DISAS("opc 0x%x\n", opc); + +#define FETCH_DECODE_RR \ + insn = ld_code2(s->pc); \ + DEBUGINSN \ + r1 = (insn >> 4) & 0xf; \ + r2 = insn & 0xf; + +#define FETCH_DECODE_RX \ + insn = ld_code4(s->pc); \ + DEBUGINSN \ + r1 = (insn >> 20) & 0xf; \ + x2 = (insn >> 16) & 0xf; \ + b2 = (insn >> 12) & 0xf; \ + d2 = insn & 0xfff; \ + tmp = get_address(x2, b2, d2); + +#define FREE_RX \ + tcg_temp_free(tmp); + +#define FETCH_DECODE_RS \ + insn = ld_code4(s->pc); \ + DEBUGINSN \ + r1 = (insn >> 20) & 0xf; \ + r3 = (insn >> 16) & 0xf; /* aka m3 */ \ + b2 = (insn >> 12) & 0xf; \ + d2 = insn & 0xfff; + +#define FETCH_DECODE_SI \ + insn = ld_code4(s->pc); \ + i2 = (insn >> 16) & 0xff; \ + b1 = (insn >> 12) & 0xf; \ + d1 = insn & 0xfff; \ + tmp = get_address(0, b1, d1); + +#define FREE_SI \ + tcg_temp_free(tmp); + + switch (opc) { + case 0x7: /* BCR M1,R2 [RR] */ + FETCH_DECODE_RR + if (r2) { + gen_bcr(r1, r2, s->pc); + s->is_jmp = DISAS_JUMP; + } + else { + /* FIXME: "serialization and checkpoint-synchronization function"? */ + } + s->pc += 2; + break; + case 0xa: /* SVC I [RR] */ + insn = ld_code2(s->pc); + DEBUGINSN + i = insn & 0xff; + tmp = tcg_const_i64(s->pc); + tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUState, psw.addr)); + tcg_temp_free(tmp); + s->is_jmp = DISAS_SVC; + s->pc += 2; + break; + case 0xd: /* BASR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = tcg_const_i64(s->pc + 2); + store_reg(r1, tmp); + if (r2) { + tmp2 = load_reg(r2); + tcg_gen_st_i64(tmp2, cpu_env, offsetof(CPUState, psw.addr)); + tcg_temp_free(tmp2); + s->is_jmp = DISAS_JUMP; + } + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x10: /* LPR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp2 = load_reg32(r2); + tmp = tcg_const_i32(r1); + gen_helper_abs_i32(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 2; + break; + case 0x11: /* LNR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp2 = load_reg32(r2); + tmp = tcg_const_i32(r1); + gen_helper_nabs_i32(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 2; + break; + case 0x12: /* LTR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r2); + if (r1 != r2) store_reg32(r1, tmp); + set_cc_s32(tmp); + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x13: /* LCR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r2); + tcg_gen_neg_i32(tmp, tmp); + store_reg32(r1, tmp); + gen_helper_set_cc_comp_s32(cc, tmp); + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x14: /* NR R1,R2 [RR] */ + case 0x16: /* OR R1,R2 [RR] */ + case 0x17: /* XR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp2 = load_reg32(r2); + tmp = load_reg32(r1); + switch (opc) { + case 0x14: tcg_gen_and_i32(tmp, tmp, tmp2); break; + case 0x16: tcg_gen_or_i32(tmp, tmp, tmp2); break; + case 0x17: tcg_gen_xor_i32(tmp, tmp, tmp2); break; + default: tcg_abort(); + } + store_reg32(r1, tmp); + set_cc_nz_u32(tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 2; + break; + case 0x18: /* LR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r2); + store_reg32(r1, tmp); + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x15: /* CLR R1,R2 [RR] */ + case 0x19: /* CR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r1); + tmp2 = load_reg32(r2); + switch (opc) { + case 0x15: cmp_u32(tmp, tmp2); break; + case 0x19: cmp_s32(tmp, tmp2); break; + default: tcg_abort(); + } + s->pc += 2; + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x1a: /* AR R1,R2 [RR] */ + case 0x1e: /* ALR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r1); + tmp2 = load_reg32(r2); + tmp3 = tcg_temp_new_i32(); + tcg_gen_add_i32(tmp3, tmp, tmp2); + store_reg32(r1, tmp3); + switch (opc) { + case 0x1a: gen_helper_set_cc_add32(cc, tmp, tmp2, tmp3); break; + case 0x1e: gen_helper_set_cc_addu32(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 2; + break; + case 0x1b: /* SR R1,R2 [RR] */ + case 0x1f: /* SLR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_reg32(r1); + tmp2 = load_reg32(r2); + tmp3 = tcg_temp_new_i32(); + tcg_gen_sub_i32(tmp3, tmp, tmp2); + store_reg32(r1, tmp3); + switch (opc) { + case 0x1b: gen_helper_set_cc_sub32(cc, tmp, tmp2, tmp3); break; + case 0x1f: gen_helper_set_cc_subu32(cc, tmp, tmp2, tmp3); break; + default: tcg_abort(); + } + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 2; + break; + case 0x28: /* LDR R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_freg(r2); + store_freg(r1, tmp); + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x38: /* LER R1,R2 [RR] */ + FETCH_DECODE_RR + tmp = load_freg32(r2); + store_freg32(r1, tmp); + tcg_temp_free(tmp); + s->pc += 2; + break; + case 0x40: /* STH R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_reg32(r1); + tcg_gen_qemu_st16(tmp2, tmp, 1); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x41: /* la */ + FETCH_DECODE_RX + store_reg(r1, tmp); /* FIXME: 31/24-bit addressing */ + FREE_RX + s->pc += 4; + break; + case 0x42: /* STC R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_reg32(r1); + tcg_gen_qemu_st8(tmp2, tmp, 1); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x43: /* IC R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + store_reg8(r1, tmp2); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x44: /* EX R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_reg(r1); + tmp3 = tcg_const_i64(s->pc + 4); + gen_helper_ex(cc, cc, tmp2, tmp, tmp3); + FREE_RX + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0x47: /* BC M1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + /* FIXME: optimize m1 == 0xf (unconditional) case */ + tmp2 = tcg_const_i32(r1); /* aka m1 */ + tmp3 = tcg_const_i64(s->pc); + gen_helper_bc(cc, tmp2, tmp, tmp3); + FREE_RX + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->is_jmp = DISAS_JUMP; + s->pc += 4; + break; + case 0x48: /* LH R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, 1); + store_reg32(r1, tmp2); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x49: /* CH R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, 1); + FREE_RX + tmp = load_reg32(r1); + cmp_s32(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x4a: /* AH R1,D2(X2,B2) [RX] */ + case 0x4b: /* SH R1,D2(X2,B2) [RX] */ + case 0x4c: /* MH R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16s(tmp2, tmp, 1); + FREE_RX + tmp = load_reg32(r1); + tmp3 = tcg_temp_new_i32(); + switch (opc) { + case 0x4a: + tcg_gen_add_i32(tmp3, tmp, tmp2); + gen_helper_set_cc_add32(cc, tmp, tmp2, tmp3); + break; + case 0x4b: + tcg_gen_sub_i32(tmp3, tmp, tmp2); + gen_helper_set_cc_sub32(cc, tmp, tmp2, tmp3); + break; + case 0x4c: + tcg_gen_mul_i32(tmp3, tmp, tmp2); + break; + default: tcg_abort(); + } + store_reg32(r1, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0x50: /* st r1, d2(x2, b2) */ + FETCH_DECODE_RX + tmp2 = load_reg32(r1); + tcg_gen_qemu_st32(tmp2, tmp, 1); + s->pc += 4; + FREE_RX + tcg_temp_free(tmp2); + break; + case 0x55: /* CL R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + FREE_RX + tmp = load_reg32(r1); + cmp_u32(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x54: /* N R1,D2(X2,B2) [RX] */ + case 0x56: /* O R1,D2(X2,B2) [RX] */ + case 0x57: /* X R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + FREE_RX + tmp = load_reg32(r1); + switch (opc) { + case 0x54: tcg_gen_and_i32(tmp, tmp, tmp2); break; + case 0x56: tcg_gen_or_i32(tmp, tmp, tmp2); break; + case 0x57: tcg_gen_xor_i32(tmp, tmp, tmp2); break; + default: tcg_abort(); + } + store_reg32(r1, tmp); + set_cc_nz_u32(tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x58: /* l r1, d2(x2, b2) */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + store_reg32(r1, tmp2); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x59: /* C R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + FREE_RX + tmp = load_reg32(r1); + cmp_s32(tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x5a: /* A R1,D2(X2,B2) [RX] */ + case 0x5b: /* S R1,D2(X2,B2) [RX] */ + case 0x5e: /* AL R1,D2(X2,B2) [RX] */ + case 0x5f: /* SL R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_reg32(r1); + tcg_gen_qemu_ld32s(tmp, tmp, 1); + tmp3 = tcg_temp_new_i32(); + switch (opc) { + case 0x5a: case 0x5e: tcg_gen_add_i32(tmp3, tmp2, tmp); break; + case 0x5b: case 0x5f: tcg_gen_sub_i32(tmp3, tmp2, tmp); break; + default: tcg_abort(); + } + store_reg32(r1, tmp3); + switch (opc) { + case 0x5a: gen_helper_set_cc_add32(cc, tmp2, tmp, tmp3); break; + case 0x5e: gen_helper_set_cc_addu32(cc, tmp2, tmp, tmp3); break; + case 0x5b: gen_helper_set_cc_sub32(cc, tmp2, tmp, tmp3); break; + case 0x5f: gen_helper_set_cc_subu32(cc, tmp2, tmp, tmp3); break; + default: tcg_abort(); + } + FREE_RX + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0x60: /* STD R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_freg(r1); + tcg_gen_qemu_st64(tmp2, tmp, 1); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x68: /* LD R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(tmp2, tmp, 1); + store_freg(r1, tmp2); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x70: /* STE R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = load_freg32(r1); + tcg_gen_qemu_st32(tmp2, tmp, 1); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x71: /* MS R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(tmp2, tmp, 1); + FREE_RX + tmp = load_reg(r1); + tcg_gen_mul_i32(tmp, tmp, tmp2); + store_reg(r1, tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x78: /* LE R1,D2(X2,B2) [RX] */ + FETCH_DECODE_RX + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + store_freg32(r1, tmp2); + FREE_RX + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x88: /* SRL R1,D2(B2) [RS] */ + case 0x89: /* SLL R1,D2(B2) [RS] */ + case 0x8a: /* SRA R1,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp = get_address(0, b2, d2); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + tmp2 = load_reg32(r1); + switch (opc) { + case 0x88: tcg_gen_shr_i32(tmp2, tmp2, tmp); break; + case 0x89: tcg_gen_shl_i32(tmp2, tmp2, tmp); break; + case 0x8a: tcg_gen_sar_i32(tmp2, tmp2, tmp); break; + default: tcg_abort(); + } + store_reg32(r1, tmp2); + if (opc == 0x8a) set_cc_s32(tmp2); + s->pc += 4; + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + case 0x91: /* TM D1(B1),I2 [SI] */ + FETCH_DECODE_SI + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + FREE_SI + tmp = tcg_const_i32(i2); + gen_helper_tm(cc, tmp2, tmp); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x92: /* MVI D1(B1),I2 [SI] */ + FETCH_DECODE_SI + tmp2 = tcg_const_i32(i2); + tcg_gen_qemu_st8(tmp2, tmp, 1); + FREE_SI + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x94: /* NI D1(B1),I2 [SI] */ + case 0x96: /* OI D1(B1),I2 [SI] */ + case 0x97: /* XI D1(B1),I2 [SI] */ + FETCH_DECODE_SI + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + switch (opc) { + case 0x94: tcg_gen_andi_i32(tmp2, tmp2, i2); break; + case 0x96: tcg_gen_ori_i32(tmp2, tmp2, i2); break; + case 0x97: tcg_gen_xori_i32(tmp2, tmp2, i2); break; + default: tcg_abort(); + } + tcg_gen_qemu_st8(tmp2, tmp, 1); + set_cc_nz_u32(tmp2); + FREE_SI + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x95: /* CLI D1(B1),I2 [SI] */ + FETCH_DECODE_SI + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + cmp_u32c(tmp2, i2); + FREE_SI + tcg_temp_free(tmp2); + s->pc += 4; + break; + case 0x9b: /* STAM R1,R3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp = tcg_const_i32(r1); + tmp2 = get_address(0, b2, d2); + tmp3 = tcg_const_i32(r3); + gen_helper_stam(tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xa5: + insn = ld_code4(s->pc); + r1 = (insn >> 20) & 0xf; + op = (insn >> 16) & 0xf; + i2 = insn & 0xffff; + disas_a5(s, op, r1, i2); + s->pc += 4; + break; + case 0xa7: + insn = ld_code4(s->pc); + r1 = (insn >> 20) & 0xf; + op = (insn >> 16) & 0xf; + i2 = (short)insn; + disas_a7(s, op, r1, i2); + s->pc += 4; + break; + case 0xa8: /* MVCLE R1,R3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + tmp2 = get_address(0, b2, d2); + gen_helper_mvcle(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xa9: /* CLCLE R1,R3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp = tcg_const_i32(r1); + tmp3 = tcg_const_i32(r3); + tmp2 = get_address(0, b2, d2); + gen_helper_clcle(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xb2: + insn = ld_code4(s->pc); + op = (insn >> 16) & 0xff; + switch (op) { + case 0x9c: /* STFPC D2(B2) [S] */ + d2 = insn & 0xfff; + b2 = (insn >> 12) & 0xf; + tmp = tcg_temp_new_i32(); + tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUState, fpc)); + tmp2 = get_address(0, b2, d2); + tcg_gen_qemu_st32(tmp, tmp2, 1); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + break; + default: + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + disas_b2(s, op, r1, r2); + break; + } + s->pc += 4; + break; + case 0xb3: + insn = ld_code4(s->pc); + op = (insn >> 16) & 0xff; + r3 = (insn >> 12) & 0xf; /* aka m3 */ + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + disas_b3(s, op, r3, r1, r2); + s->pc += 4; + break; + case 0xb9: + insn = ld_code4(s->pc); + r1 = (insn >> 4) & 0xf; + r2 = insn & 0xf; + op = (insn >> 16) & 0xff; + disas_b9(s, op, r1, r2); + s->pc += 4; + break; + case 0xba: /* CS R1,R3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp = tcg_const_i32(r1); + tmp2 = get_address(0, b2, d2); + tmp3 = tcg_const_i32(r3); + gen_helper_cs(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xbd: /* CLM R1,M3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp3 = get_address(0, b2, d2); + tmp2 = tcg_const_i32(r3); /* aka m3 */ + tmp = load_reg32(r1); + gen_helper_clm(cc, tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xbe: /* STCM R1,M3,D2(B2) [RS] */ + FETCH_DECODE_RS + tmp3 = get_address(0, b2, d2); + tmp2 = tcg_const_i32(r3); /* aka m3 */ + tmp = load_reg32(r1); + gen_helper_stcm(tmp, tmp2, tmp3); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + s->pc += 4; + break; + case 0xbf: /* ICM R1,M3,D2(B2) [RS] */ + FETCH_DECODE_RS + if (r3 == 15) { /* effectively a 32-bit load */ + tmp = get_address(0, b2, d2); + tmp2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(tmp2, tmp, 1); + store_reg32(r1, tmp2); + tcg_temp_free(tmp); + tmp = tcg_const_i32(r3); + gen_helper_set_cc_icm(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + } + else if (r3) { + uint32_t mask = 0x00ffffffUL; + uint32_t shift = 24; + int m3 = r3; + tmp3 = load_reg32(r1); + tmp = get_address(0, b2, d2); + tmp2 = tcg_temp_new_i64(); + while (m3) { + if (m3 & 8) { + tcg_gen_qemu_ld8u(tmp2, tmp, 1); + if (shift) tcg_gen_shli_i32(tmp2, tmp2, shift); + tcg_gen_andi_i32(tmp3, tmp3, mask); + tcg_gen_or_i32(tmp3, tmp3, tmp2); + tcg_gen_addi_i64(tmp, tmp, 1); + } + m3 = (m3 << 1) & 0xf; + mask = (mask >> 8) | 0xff000000UL; + shift -= 8; + } + store_reg32(r1, tmp3); + tcg_temp_free(tmp); + tmp = tcg_const_i32(r3); + gen_helper_set_cc_icm(cc, tmp, tmp2); + tcg_temp_free(tmp); + tcg_temp_free(tmp2); + tcg_temp_free(tmp3); + } + else { + tmp = tcg_const_i32(0); + gen_helper_set_cc_icm(cc, tmp, tmp); /* i.e. env->cc = 0 */ + tcg_temp_free(tmp); + } + s->pc += 4; + break; + case 0xc0: + case 0xc2: + insn = ld_code6(s->pc); + r1 = (insn >> 36) & 0xf; + op = (insn >> 32) & 0xf; + i2 = (int)insn; + switch (opc) { + case 0xc0: disas_c0(s, op, r1, i2); break; + case 0xc2: disas_c2(s, op, r1, i2); break; + default: tcg_abort(); + } + s->pc += 6; + break; + case 0xd2: /* mvc d1(l, b1), d2(b2) */ + case 0xd4: /* NC D1(L,B1),D2(B2) [SS] */ + case 0xd5: /* CLC D1(L,B1),D2(B2) [SS] */ + case 0xd6: /* OC D1(L,B1),D2(B2) [SS] */ + case 0xd7: /* xc d1(l, b1), d2(b2) */ + insn = ld_code6(s->pc); + vl = tcg_const_i32((insn >> 32) & 0xff); + b1 = (insn >> 28) & 0xf; + vd1 = tcg_const_i32((insn >> 16) & 0xfff); + b2 = (insn >> 12) & 0xf; + vd2 = tcg_const_i32(insn & 0xfff); + vb = tcg_const_i32((b1 << 4) | b2); + switch (opc) { + case 0xd2: gen_helper_mvc(vl, vb, vd1, vd2); break; + case 0xd4: gen_helper_nc(cc, vl, vb, vd1, vd2); break; + case 0xd5: gen_helper_clc(cc, vl, vb, vd1, vd2); break; + case 0xd6: gen_helper_oc(cc, vl, vb, vd1, vd2); break; + case 0xd7: gen_helper_xc(cc, vl, vb, vd1, vd2); break; + default: tcg_abort(); break; + } + s->pc += 6; + break; + case 0xe3: + insn = ld_code6(s->pc); + DEBUGINSN + d2 = ( (int) ( (((insn >> 16) & 0xfff) | ((insn << 4) & 0xff000)) << 12 ) ) >> 12; + disas_e3(s, /* op */ insn & 0xff, /* r1 */ (insn >> 36) & 0xf, /* x2 */ (insn >> 32) & 0xf, /* b2 */ (insn >> 28) & 0xf, d2 ); + s->pc += 6; + break; + case 0xeb: + insn = ld_code6(s->pc); + DEBUGINSN + op = insn & 0xff; + r1 = (insn >> 36) & 0xf; + r3 = (insn >> 32) & 0xf; + b2 = (insn >> 28) & 0xf; + d2 = ( (int) ( (((insn >> 16) & 0xfff) | ((insn << 4) & 0xff000)) << 12 ) ) >> 12; + disas_eb(s, op, r1, r3, b2, d2); + s->pc += 6; + break; + case 0xed: + insn = ld_code6(s->pc); + DEBUGINSN + op = insn & 0xff; + r1 = (insn >> 36) & 0xf; + x2 = (insn >> 32) & 0xf; + b2 = (insn >> 28) & 0xf; + d2 = (short)((insn >> 16) & 0xfff); + r1b = (insn >> 12) & 0xf; + disas_ed(s, op, r1, x2, b2, d2, r1b); + s->pc += 6; + break; + default: + LOG_DISAS("unimplemented opcode 0x%x\n", opc); + gen_illegal_opcode(s); + s->pc += 6; + break; + } +} + +static inline void gen_intermediate_code_internal (CPUState *env, + TranslationBlock *tb, + int search_pc) +{ + DisasContext dc; + target_ulong pc_start; + uint64_t next_page_start; + uint16_t *gen_opc_end; + int j, lj = -1; + int num_insns, max_insns; + + pc_start = tb->pc; + + dc.pc = tb->pc; + dc.env = env; + dc.pc = pc_start; + dc.is_jmp = DISAS_NEXT; + dc.tb = tb; + + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + gen_icount_start(); + + /* using a temp for the condition code allows TCG to optimize away + any condition code calculations that are not actually used */ + cc = tcg_temp_local_new_i32(); + tcg_gen_mov_i32(cc, global_cc); + do { + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + gen_opc_instr_start[lj++] = 0; + } + gen_opc_pc[lj] = dc.pc; + gen_opc_instr_start[lj] = 1; + gen_opc_icount[lj] = num_insns; + } + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + gen_io_start(); +#if defined S390X_DEBUG_DISAS + LOG_DISAS("pc " TARGET_FMT_lx "\n", + dc.pc); +#endif + disas_s390_insn(env, &dc); + + num_insns++; + } while (!dc.is_jmp && gen_opc_ptr < gen_opc_end && dc.pc < next_page_start + && num_insns < max_insns && !env->singlestep_enabled); + + if (dc.is_jmp != DISAS_TB_JUMP) { + tcg_gen_mov_i32(global_cc, cc); + tcg_temp_free(cc); + } + + if (!dc.is_jmp) { + tcg_gen_st_i64(tcg_const_i64(dc.pc), cpu_env, offsetof(CPUState, psw.addr)); + } + + if (dc.is_jmp == DISAS_SVC) { + tcg_gen_st_i64(tcg_const_i64(dc.pc), cpu_env, offsetof(CPUState, psw.addr)); + TCGv tmp = tcg_const_i32(EXCP_SVC); + gen_helper_exception(tmp); + } + + if (tb->cflags & CF_LAST_IO) + gen_io_end(); + /* Generate the return instruction */ + if (dc.is_jmp != DISAS_TB_JUMP) { + tcg_gen_exit_tb(0); + } + gen_icount_end(tb, num_insns); + *gen_opc_ptr = INDEX_op_end; + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) + gen_opc_instr_start[lj++] = 0; + } else { + tb->size = dc.pc - pc_start; + tb->icount = num_insns; + } +#if defined S390X_DEBUG_DISAS + log_cpu_state_mask(CPU_LOG_TB_CPU, env, 0); + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { + qemu_log("IN: %s\n", lookup_symbol(pc_start)); + log_target_disas(pc_start, dc.pc - pc_start, 1); + qemu_log("\n"); + } +#endif +} + void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb) { + gen_intermediate_code_internal(env, tb, 0); } void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb) { + gen_intermediate_code_internal(env, tb, 1); } void gen_pc_load(CPUState *env, TranslationBlock *tb, Index: qemu-0.14.1/tcg/tcg-op.h =================================================================== --- qemu-0.14.1.orig/tcg/tcg-op.h +++ qemu-0.14.1/tcg/tcg-op.h @@ -366,6 +366,18 @@ static inline void tcg_gen_br(int label) tcg_gen_op1i(INDEX_op_br, label); } +static inline void tcg_gen_sync_i32(TCGv_i32 arg) +{ + tcg_gen_op1_i32(INDEX_op_sync_i32, arg); +} + +#if TCG_TARGET_REG_BITS == 64 +static inline void tcg_gen_sync_i64(TCGv_i64 arg) +{ + tcg_gen_op1_i64(INDEX_op_sync_i64, arg); +} +#endif + static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) { if (!TCGV_EQUAL_I32(ret, arg)) Index: qemu-0.14.1/tcg/tcg-opc.h =================================================================== --- qemu-0.14.1.orig/tcg/tcg-opc.h +++ qemu-0.14.1/tcg/tcg-opc.h @@ -41,6 +41,7 @@ DEF(call, 0, 1, 2, TCG_OPF_SIDE_EFFECTS) DEF(jmp, 0, 1, 0, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) +DEF(sync_i32, 0, 1, 0, 0) DEF(mov_i32, 1, 1, 0, 0) DEF(movi_i32, 1, 0, 1, 0) DEF(setcond_i32, 1, 2, 1, 0) @@ -131,6 +132,7 @@ DEF(nor_i32, 1, 2, 0, 0) #endif #if TCG_TARGET_REG_BITS == 64 +DEF(sync_i64, 0, 1, 0, 0) DEF(mov_i64, 1, 1, 0, 0) DEF(movi_i64, 1, 0, 1, 0) DEF(setcond_i64, 1, 2, 1, 0) Index: qemu-0.14.1/tcg/tcg.c =================================================================== --- qemu-0.14.1.orig/tcg/tcg.c +++ qemu-0.14.1/tcg/tcg.c @@ -2022,6 +2022,12 @@ static inline int tcg_gen_code_common(TC // dump_regs(s); #endif switch(opc) { + case INDEX_op_sync_i32: +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_sync_i64: +#endif + temp_save(s, args[0], s->reserved_regs); + break; case INDEX_op_mov_i32: #if TCG_TARGET_REG_BITS == 64 case INDEX_op_mov_i64: