|
| 1 | +// Copyright 2019 The Go Authors. All rights reserved. |
| 2 | +// Use of this source code is governed by a BSD-style |
| 3 | +// license that can be found in the LICENSE file. |
| 4 | + |
| 5 | +//go:build tamago |
| 6 | + |
| 7 | +package runtime |
| 8 | + |
| 9 | +import ( |
| 10 | + "internal/runtime/atomic" |
| 11 | + "unsafe" |
| 12 | +) |
| 13 | + |
| 14 | +// see testing.testBinary |
| 15 | +var testBinary string |
| 16 | + |
| 17 | +// Bloc allows to override the heap memory start address |
| 18 | +var Bloc uintptr |
| 19 | + |
| 20 | +// the following functions must be provided externally |
| 21 | +func hwinit() |
| 22 | +func printk(byte) |
| 23 | +func getRandomData([]byte) |
| 24 | +func initRNG() |
| 25 | + |
| 26 | +// the following functions must be provided externally |
| 27 | +// (but are already stubbed somewhere else in the runtime) |
| 28 | +//func nanotime1() int64 |
| 29 | + |
| 30 | +// GetRandomData generates len(r) random bytes from the random source provided |
| 31 | +// externally by the linked application. |
| 32 | +func GetRandomData(r []byte) { |
| 33 | + getRandomData(r) |
| 34 | +} |
| 35 | + |
| 36 | +// CallOnG0 calls a function (func(off int)) on g0 stack. |
| 37 | +// |
| 38 | +// The function arguments must be passed through the following registers |
| 39 | +// (rather than on the frame pointer): |
| 40 | +// |
| 41 | +// * R0: fn argument (vector table offset) |
| 42 | +// * R1: fn pointer |
| 43 | +// * R2: size of stack area reserved for caller registers |
| 44 | +// * R3: caller program counter |
| 45 | +func CallOnG0() |
| 46 | + |
| 47 | +// WakeG modifies a goroutine cached timer for time.Sleep (g.timer) to fire as |
| 48 | +// soon as possible. |
| 49 | +// |
| 50 | +// The function arguments must be passed through the following registers |
| 51 | +// (rather than on the frame pointer): |
| 52 | +// |
| 53 | +// * R0: G pointer |
| 54 | +func WakeG() |
| 55 | + |
| 56 | +// stubs for unused/unimplemented functionality |
| 57 | +type mOS struct{} |
| 58 | +type sigset struct{} |
| 59 | +type gsignalStack struct{} |
| 60 | + |
| 61 | +func goenvs() {} |
| 62 | +func sigsave(p *sigset) {} |
| 63 | +func msigrestore(sigmask sigset) {} |
| 64 | +func clearSignalHandlers() {} |
| 65 | +func sigblock(exiting bool) {} |
| 66 | +func minit() {} |
| 67 | +func unminit() {} |
| 68 | +func mdestroy(mp *m) {} |
| 69 | +func setProcessCPUProfiler(hz int32) {} |
| 70 | +func setThreadCPUProfiler(hz int32) {} |
| 71 | +func initsig(preinit bool) {} |
| 72 | +func osyield() {} |
| 73 | +func osyield_no_g() {} |
| 74 | + |
| 75 | +// May run with m.p==nil, so write barriers are not allowed. |
| 76 | +// |
| 77 | +//go:nowritebarrier |
| 78 | +func newosproc(mp *m) { |
| 79 | + throw("newosproc: not implemented") |
| 80 | +} |
| 81 | + |
| 82 | +// Called to initialize a new m (including the bootstrap m). |
| 83 | +// Called on the parent thread (main thread in case of bootstrap), can allocate memory. |
| 84 | +func mpreinit(mp *m) { |
| 85 | + mp.gsignal = malg(32 * 1024) |
| 86 | + mp.gsignal.m = mp |
| 87 | +} |
| 88 | + |
| 89 | +func osinit() { |
| 90 | + ncpu = 1 |
| 91 | + physPageSize = 4096 |
| 92 | + |
| 93 | + if Bloc != 0 { |
| 94 | + bloc = Bloc |
| 95 | + blocMax = bloc |
| 96 | + } else { |
| 97 | + initBloc() |
| 98 | + } |
| 99 | +} |
| 100 | + |
| 101 | +func readRandom(r []byte) int { |
| 102 | + initRNG() |
| 103 | + getRandomData(r) |
| 104 | + return len(r) |
| 105 | +} |
| 106 | + |
| 107 | +func signame(sig uint32) string { |
| 108 | + return "" |
| 109 | +} |
| 110 | + |
| 111 | +//go:linkname os_sigpipe os.sigpipe |
| 112 | +func os_sigpipe() { |
| 113 | + throw("too many writes on closed pipe") |
| 114 | +} |
| 115 | + |
| 116 | +//go:nosplit |
| 117 | +func crash() { |
| 118 | + *(*int32)(nil) = 0 |
| 119 | +} |
| 120 | + |
| 121 | +//go:linkname syscall |
| 122 | +func syscall(number, a1, a2, a3 uintptr) (r1, r2, err uintptr) { |
| 123 | + switch number { |
| 124 | + // SYS_WRITE |
| 125 | + case 1: |
| 126 | + r1 := write(a1, unsafe.Pointer(a2), int32(a3)) |
| 127 | + return uintptr(r1), 0, 0 |
| 128 | + default: |
| 129 | + throw("unexpected syscall") |
| 130 | + } |
| 131 | + |
| 132 | + return |
| 133 | +} |
| 134 | + |
| 135 | +//go:nosplit |
| 136 | +func write1(fd uintptr, buf unsafe.Pointer, count int32) int32 { |
| 137 | + if fd != 1 && fd != 2 { |
| 138 | + throw("unexpected fd, only stdout/stderr are supported") |
| 139 | + } |
| 140 | + |
| 141 | + c := uintptr(count) |
| 142 | + |
| 143 | + for i := uintptr(0); i < c; i++ { |
| 144 | + p := (*byte)(unsafe.Pointer(uintptr(buf) + i)) |
| 145 | + printk(*p) |
| 146 | + } |
| 147 | + |
| 148 | + return int32(c) |
| 149 | +} |
| 150 | + |
| 151 | +//go:linkname syscall_now syscall.now |
| 152 | +func syscall_now() (sec int64, nsec int32) { |
| 153 | + sec, nsec, _ = time_now() |
| 154 | + return |
| 155 | +} |
| 156 | + |
| 157 | +//go:nosplit |
| 158 | +func walltime() (sec int64, nsec int32) { |
| 159 | + nano := nanotime() |
| 160 | + sec = nano / 1000000000 |
| 161 | + nsec = int32(nano % 1000000000) |
| 162 | + return |
| 163 | +} |
| 164 | + |
| 165 | +//go:nosplit |
| 166 | +func usleep(us uint32) { |
| 167 | + wake := nanotime() + int64(us)*1000 |
| 168 | + for nanotime() < wake { |
| 169 | + } |
| 170 | +} |
| 171 | + |
| 172 | +//go:nosplit |
| 173 | +func usleep_no_g(usec uint32) { |
| 174 | + usleep(usec) |
| 175 | +} |
| 176 | + |
| 177 | +// Exit can be provided externally by the linked application to provide an |
| 178 | +// implementation for runtime.exit. |
| 179 | +var Exit func(int32) |
| 180 | + |
| 181 | +func exit(code int32) { |
| 182 | + if Exit != nil { |
| 183 | + Exit(code) |
| 184 | + } |
| 185 | + |
| 186 | + print("exit with code ", code, " halting\n") |
| 187 | + |
| 188 | + for { |
| 189 | + // hang forever |
| 190 | + } |
| 191 | +} |
| 192 | + |
| 193 | +func exitThread(wait *atomic.Uint32) { |
| 194 | + // We should never reach exitThread |
| 195 | + throw("exitThread: not implemented") |
| 196 | +} |
| 197 | + |
| 198 | +const preemptMSupported = false |
| 199 | + |
| 200 | +func preemptM(mp *m) { |
| 201 | + // No threads, so nothing to do. |
| 202 | +} |
| 203 | + |
| 204 | +// Stubs so tests can link correctly. These should never be called. |
| 205 | +func open(name *byte, mode, perm int32) int32 { panic("not implemented") } |
| 206 | +func closefd(fd int32) int32 { panic("not implemented") } |
| 207 | +func read(fd int32, p unsafe.Pointer, n int32) int32 { panic("not implemented") } |
0 commit comments