Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

View file

@ -0,0 +1,69 @@
use crate::detail::align_down;
use crate::reg_context::InitFn;
use crate::stack::Stack;
#[link(name = "asm", kind = "static")]
extern "C" {
pub fn bootstrap_green_task();
pub fn prefetch(data: *const usize);
pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
#[repr(C, align(16))]
#[derive(Debug)]
pub struct Registers {
// We save the 13 callee-saved registers:
// x19--x28, fp (x29), lr (x30), sp
// and the 8 callee-saved floating point registers:
// v8--v15
gpr: [usize; 32],
}
impl Registers {
pub fn new() -> Registers {
Registers { gpr: [0; 32] }
}
#[inline]
pub fn prefetch(&self) {
let ptr = self.gpr[12] as *const usize;
unsafe {
prefetch(ptr); // RSP
prefetch(ptr.add(8)); // RSP + 8
}
}
}
pub fn initialize_call_frame(
regs: &mut Registers,
fptr: InitFn,
arg: usize,
arg2: *mut usize,
stack: &Stack,
) {
// Callee-saved registers start at x19
const X19: usize = 19 - 19;
const X20: usize = 20 - 19;
const X21: usize = 21 - 19;
const FP: usize = 29 - 19;
const LR: usize = 30 - 19;
const SP: usize = 31 - 19;
let sp = align_down(stack.end());
// These registers are frobbed by bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[X19] = arg;
regs.gpr[X20] = arg2 as usize;
regs.gpr[X21] = fptr as usize;
// Aarch64 current stack frame pointer
regs.gpr[FP] = sp as usize;
regs.gpr[LR] = bootstrap_green_task as usize;
// setup the init stack
// this is prepared for the swap context
regs.gpr[SP] = sp as usize;
}

View file

@ -0,0 +1,60 @@
.text
.globl prefetch
.type prefetch,@function
.align 16
prefetch:
prfm pldl1keep, [x0]
ret
.size prefetch,.-prefetch
.text
.globl bootstrap_green_task
.type bootstrap_green_task,@function
.align 16
bootstrap_green_task:
mov x0, x19 // arg0
mov x1, x20 // arg1
mov x30, #0 // clear LR
ret x21
.size bootstrap_green_task,.-bootstrap_green_task
.text
.globl swap_registers
.type swap_registers,@function
.align 16
swap_registers:
stp x19, x20, [x0, #0]
stp x21, x22, [x0, #16]
stp x23, x24, [x0, #32]
stp x25, x26, [x0, #48]
stp x27, x28, [x0, #64]
stp x29, x30, [x0, #80]
mov x2, sp
str x2, [x0, #96]
stp d8, d9, [x0, #112]
stp d10, d11, [x0, #128]
stp d12, d13, [x0, #144]
stp d14, d15, [x0, #160]
ldp x19, x20, [x1, #0]
ldp x21, x22, [x1, #16]
ldp x23, x24, [x1, #32]
ldp x25, x26, [x1, #48]
ldp x27, x28, [x1, #64]
ldp x29, x30, [x1, #80]
ldr x2, [x1, #96]
mov sp, x2
ldp d8, d9, [x1, #112]
ldp d10, d11, [x1, #128]
ldp d12, d13, [x1, #144]
ldp d14, d15, [x1, #160]
br x30
.size swap_registers,.-swap_registers
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits

View file

@ -0,0 +1,51 @@
.text
.globl _prefetch
.align 8
_prefetch:
prfm pldl1keep, [x0]
ret
.text
.globl _bootstrap_green_task
.align 8
_bootstrap_green_task:
mov x0, x19 // arg0
mov x1, x20 // arg1
mov x30, #0 // clear LR
ret x21
.text
.globl _swap_registers
.align 8
_swap_registers:
stp x19, x20, [x0, #0]
stp x21, x22, [x0, #16]
stp x23, x24, [x0, #32]
stp x25, x26, [x0, #48]
stp x27, x28, [x0, #64]
stp x29, x30, [x0, #80]
mov x2, sp
str x2, [x0, #96]
stp d8, d9, [x0, #112]
stp d10, d11, [x0, #128]
stp d12, d13, [x0, #144]
stp d14, d15, [x0, #160]
ldp x19, x20, [x1, #0]
ldp x21, x22, [x1, #16]
ldp x23, x24, [x1, #32]
ldp x25, x26, [x1, #48]
ldp x27, x28, [x1, #64]
ldp x29, x30, [x1, #80]
ldr x2, [x1, #96]
mov sp, x2
ldp d8, d9, [x1, #112]
ldp d10, d11, [x1, #128]
ldp d12, d13, [x1, #144]
ldp d14, d15, [x1, #160]
br x30

View file

@ -0,0 +1,129 @@
.file "asm_x86_64_ms_pe_gas.asm"
.text
.p2align 4,,15
.globl prefetch_asm
.def prefetch_asm; .scl 2; .type 32; .endef
.seh_proc prefetch_asm
prefetch_asm:
.seh_endprologue
prefetcht2 (%rdi)
ret
.seh_endproc
.section .drectve
.ascii " -export:\"prefetch_asm\""
.text
.p2align 4,,15
.globl bootstrap_green_task
.def bootstrap_green_task; .scl 2; .type 32; .endef
.seh_proc bootstrap_green_task
bootstrap_green_task:
.seh_endprologue
mov %r12, %rcx /* setup the function arg */
mov %r13, %rdx /* setup the function arg */
and $-16, %rsp /* align the stack pointer */
mov %r14, (%rsp) /* this is the new return adrress */
ret
.seh_endproc
.section .drectve
.ascii " -export:\"bootstrap_green_task\""
.text
.p2align 4,,15
.globl swap_registers
.def swap_registers; .scl 2; .type 32; .endef
.seh_proc swap_registers
swap_registers:
.seh_endprologue
mov %rbx, (0*8)(%rcx)
mov %rsp, (1*8)(%rcx)
mov %rbp, (2*8)(%rcx)
mov %r12, (4*8)(%rcx)
mov %r13, (5*8)(%rcx)
mov %r14, (6*8)(%rcx)
mov %r15, (7*8)(%rcx)
mov %rdi, (9*8)(%rcx)
mov %rsi, (10*8)(%rcx)
/* align mem */
mov %rcx, %r10
and $0xf0, %r10b
/* Save non-volatile XMM registers */
movapd %xmm6, (16*8)(%r10)
movapd %xmm7, (18*8)(%r10)
movapd %xmm8, (20*8)(%r10)
movapd %xmm9, (22*8)(%r10)
movapd %xmm10, (24*8)(%r10)
movapd %xmm11, (26*8)(%r10)
movapd %xmm12, (28*8)(%r10)
movapd %xmm13, (30*8)(%r10)
movapd %xmm14, (32*8)(%r10)
movapd %xmm15, (34*8)(%r10)
/* load NT_TIB */
movq %gs:(0x30), %r10
/* save current stack base */
movq 0x08(%r10), %rax
mov %rax, (11*8)(%rcx)
/* save current stack limit */
movq 0x10(%r10), %rax
mov %rax, (12*8)(%rcx)
/* save current deallocation stack */
movq 0x1478(%r10), %rax
mov %rax, (13*8)(%rcx)
/* save fiber local storage */
/* movq 0x18(%r10), %rax */
/* mov %rax, (14*8)(%rcx) */
; mov %rcx, (3*8)(%rcx)
mov (0*8)(%rdx), %rbx
mov (1*8)(%rdx), %rsp
mov (2*8)(%rdx), %rbp
mov (4*8)(%rdx), %r12
mov (5*8)(%rdx), %r13
mov (6*8)(%rdx), %r14
mov (7*8)(%rdx), %r15
mov (9*8)(%rdx), %rdi
mov (10*8)(%rdx), %rsi
/* align mem */
mov %rdx, %r10
and $0xf0, %r10b
/* Restore non-volatile XMM registers */
movapd (16*8)(%r10), %xmm6
movapd (18*8)(%r10), %xmm7
movapd (20*8)(%r10), %xmm8
movapd (22*8)(%r10), %xmm9
movapd (24*8)(%r10), %xmm10
movapd (26*8)(%r10), %xmm11
movapd (28*8)(%r10), %xmm12
movapd (30*8)(%r10), %xmm13
movapd (32*8)(%r10), %xmm14
movapd (34*8)(%r10), %xmm15
/* load NT_TIB */
movq %gs:(0x30), %r10
/* restore fiber local storage */
/* mov (14*8)(%rdx), %rax */
/* movq %rax, 0x18(%r10) */
/* restore deallocation stack */
mov (13*8)(%rdx), %rax
movq %rax, 0x1478(%r10)
/* restore stack limit */
mov (12*8)(%rdx), %rax
movq %rax, 0x10(%r10)
/* restore stack base */
mov (11*8)(%rdx), %rax
movq %rax, 0x8(%r10)
; mov (3*8)(%rdx), %rcx
pop %rax
jmp %rax
.seh_endproc
.section .drectve
.ascii " -export:\"swap_registers\""

View file

@ -0,0 +1,110 @@
.code
prefetch_asm PROC FRAME
.endprolog
prefetcht2 [rcx]
ret
prefetch_asm ENDP
bootstrap_green_task PROC FRAME
.endprolog
mov rcx, r12 ; setup the function arg
mov rdx, r13 ; setup the function arg
and rsp, -16 ; align the stack pointer
mov [rsp], r14 ; this is the new return adrress
ret
bootstrap_green_task ENDP
swap_registers PROC FRAME
.endprolog
mov [rcx + 0*8], rbx
mov [rcx + 1*8], rsp
mov [rcx + 2*8], rbp
mov [rcx + 4*8], r12
mov [rcx + 5*8], r13
mov [rcx + 6*8], r14
mov [rcx + 7*8], r15
mov [rcx + 9*8], rdi
mov [rcx + 10*8], rsi
mov r10, rcx
and r10, not 8
; Save non-volatile XMM registers:
movapd [r10 + 16*8], xmm6
movapd [r10 + 18*8], xmm7
movapd [r10 + 20*8], xmm8
movapd [r10 + 22*8], xmm9
movapd [r10 + 24*8], xmm10
movapd [r10 + 26*8], xmm11
movapd [r10 + 28*8], xmm12
movapd [r10 + 30*8], xmm13
movapd [r10 + 32*8], xmm14
movapd [r10 + 34*8], xmm15
; load NT_TIB
mov r10, gs:[030h]
; save current stack base
mov rax, [r10 + 08h]
mov [rcx + 11*8], rax
; save current stack limit
mov rax, [r10 + 010h]
mov [rcx + 12*8], rax
; save current deallocation stack
mov rax, [r10 + 01478h]
mov [rcx + 13*8], rax
; save fiber local storage
; mov rax, [r10 + 0x18]
; mov [rcx + 14*8], rax
; mov [rcx + 3*8], rcx
mov rbx, [rdx + 0*8]
mov rsp, [rdx + 1*8]
mov rbp, [rdx + 2*8]
mov r12, [rdx + 4*8]
mov r13, [rdx + 5*8]
mov r14, [rdx + 6*8]
mov r15, [rdx + 7*8]
mov rdi, [rdx + 9*8]
mov rsi, [rdx + 10*8]
mov r10, rdx
and r10, not 8
; Restore non-volatile XMM registers:
movapd xmm6, [r10 + 16*8]
movapd xmm7, [r10 + 18*8]
movapd xmm8, [r10 + 20*8]
movapd xmm9, [r10 + 22*8]
movapd xmm10, [r10 + 24*8]
movapd xmm11, [r10 + 26*8]
movapd xmm12, [r10 + 28*8]
movapd xmm13, [r10 + 30*8]
movapd xmm14, [r10 + 32*8]
movapd xmm15, [r10 + 34*8]
; load NT_TIB
mov r10, gs:[030h]
; restore fiber local storage
; mov [rdx + 14*8], rax
; movq rax, [r10 + 0x18]
; restore deallocation stack
mov rax, [rdx + 13*8]
mov [r10 + 01478h], rax
; restore stack limit
mov rax, [rdx + 12*8]
mov [r10 + 010h], rax
; restore stack base
mov rax, [rdx + 11*8]
mov [r10 + 08h], rax
; mov rcx, [rdx + 3*8]
pop rax
jmp rax
swap_registers ENDP
END

View file

@ -0,0 +1,48 @@
.text
.globl prefetch
.type prefetch,@function
.align 16
prefetch:
prefetcht2 (%rdi)
ret
.size prefetch,.-prefetch
.text
.globl bootstrap_green_task
.type bootstrap_green_task,@function
.align 16
bootstrap_green_task:
mov %r12, %rdi /* setup the function arg */
mov %r13, %rsi /* setup the function arg */
and $-16, %rsp /* align the stack pointer */
mov %r14, (%rsp) /* this is the new return adrress */
ret
.size bootstrap_green_task,.-bootstrap_green_task
.text
.globl swap_registers
.type swap_registers,@function
.align 16
swap_registers:
mov %rbx, (0*8)(%rdi)
mov %rsp, (1*8)(%rdi)
mov %rbp, (2*8)(%rdi)
mov %r12, (4*8)(%rdi)
mov %r13, (5*8)(%rdi)
mov %r14, (6*8)(%rdi)
mov %r15, (7*8)(%rdi)
mov (0*8)(%rsi), %rbx
mov (1*8)(%rsi), %rsp
mov (2*8)(%rsi), %rbp
mov (4*8)(%rsi), %r12
mov (5*8)(%rsi), %r13
mov (6*8)(%rsi), %r14
mov (7*8)(%rsi), %r15
pop %rax
jmp *%rax
.size bootstrap_green_task,.-bootstrap_green_task
/* Mark that we don't need executable stack. */
.section .note.GNU-stack,"",%progbits

View file

@ -0,0 +1,39 @@
.text
.globl _prefetch
.align 8
_prefetch:
prefetcht2 (%rdi)
ret
.text
.globl _bootstrap_green_task
.align 8
_bootstrap_green_task:
mov %r12, %rdi /* setup the function arg */
mov %r13, %rsi /* setup the function arg */
and $-16, %rsp /* align the stack pointer */
mov %r14, (%rsp) /* this is the new return adrress */
ret
.text
.globl _swap_registers
.align 8
_swap_registers:
mov %rbx, (0*8)(%rdi)
mov %rsp, (1*8)(%rdi)
mov %rbp, (2*8)(%rdi)
mov %r12, (4*8)(%rdi)
mov %r13, (5*8)(%rdi)
mov %r14, (6*8)(%rdi)
mov %r15, (7*8)(%rdi)
mov (0*8)(%rsi), %rbx
mov (1*8)(%rsi), %rsp
mov (2*8)(%rsi), %rbp
mov (4*8)(%rsi), %r12
mov (5*8)(%rsi), %r13
mov (6*8)(%rsi), %r14
mov (7*8)(%rsi), %r15
pop %rax
jmp *%rax

View file

@ -0,0 +1,51 @@
// Register contexts used in various architectures
//
// These structures all represent a context of one task throughout its
// execution. Each struct is a representation of the architecture's register
// set. When swapping between tasks, these register sets are used to save off
// the current registers into one struct, and load them all from another.
//
// Note that this is only used for context switching, which means that some of
// the registers may go unused. For example, for architectures with
// callee/caller saved registers, the context will only reflect the callee-saved
// registers. This is because the caller saved registers are already stored
// elsewhere on the stack (if it was necessary anyway).
//
// Additionally, there may be fields on various architectures which are unused
// entirely because they only reflect what is theoretically possible for a
// "complete register set" to show, but user-space cannot alter these registers.
// An example of this would be the segment selectors for x86.
//
// These structures/functions are roughly in-sync with the source files inside
// of src/rt/arch/$arch. The only currently used function from those folders is
// the `rust_swap_registers` function, but that's only because for now segmented
// stacks are disabled.
#[cfg(all(unix, target_arch = "x86_64"))]
#[path = "x86_64_unix.rs"]
pub mod asm;
#[cfg(all(windows, target_arch = "x86_64"))]
#[path = "x86_64_windows.rs"]
pub mod asm;
#[cfg(all(unix, target_arch = "aarch64"))]
#[path = "aarch64_unix.rs"]
pub mod asm;
pub use self::asm::{initialize_call_frame, prefetch, swap_registers, Registers};
#[inline]
fn align_down(sp: *mut usize) -> *mut usize {
let sp = (sp as usize) & !(16 - 1);
sp as *mut usize
}
// ptr::mut_offset is positive isize only
#[inline]
#[allow(unused)]
fn mut_offset<T>(ptr: *mut T, count: isize) -> *mut T {
// use std::mem::size_of;
// (ptr as isize + count * (size_of::<T>() as isize)) as *mut T
unsafe { ptr.offset(count) }
}

View file

@ -0,0 +1,149 @@
use crate::detail::{align_down, mut_offset};
use crate::reg_context::InitFn;
use crate::stack::Stack;
// #[cfg(not(nightly))]
#[link(name = "asm", kind = "static")]
extern "C" {
pub fn bootstrap_green_task();
pub fn prefetch(data: *const usize);
pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
/*
#[cfg(nightly)]
mod asm_impl {
use super::Registers;
/// prefetch data
#[inline]
pub unsafe extern "C" fn prefetch(data: *const usize) {
llvm_asm!(
"prefetcht1 $0"
: // no output
: "m"(*data)
:
: "volatile"
);
}
#[naked]
#[inline(never)]
pub unsafe extern "C" fn bootstrap_green_task() {
llvm_asm!(
"
mov %r12, %rdi // setup the function arg
mov %r13, %rsi // setup the function arg
and $$-16, %rsp // align the stack pointer
mov %r14, (%rsp) // this is the new return address
"
: // no output
: // no input
: "memory"
: "volatile"
);
}
#[naked]
#[inline(never)]
pub unsafe extern "C" fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers) {
// The first argument is in %rdi, and the second one is in %rsi
llvm_asm!(
""
:
: "{rdi}"(out_regs), "{rsi}"(in_regs)
:
:
);
// introduce this function to workaround rustc bug! (#6)
#[naked]
unsafe extern "C" fn _swap_reg() {
// Save registers
llvm_asm!(
"
mov %rbx, (0*8)(%rdi)
mov %rsp, (1*8)(%rdi)
mov %rbp, (2*8)(%rdi)
mov %r12, (4*8)(%rdi)
mov %r13, (5*8)(%rdi)
mov %r14, (6*8)(%rdi)
mov %r15, (7*8)(%rdi)
mov (0*8)(%rsi), %rbx
mov (1*8)(%rsi), %rsp
mov (2*8)(%rsi), %rbp
mov (4*8)(%rsi), %r12
mov (5*8)(%rsi), %r13
mov (6*8)(%rsi), %r14
mov (7*8)(%rsi), %r15
"
:
: //"{rdi}"(out_regs), "{rsi}"(in_regs)
: "memory"
: "volatile"
);
}
_swap_reg()
}
}
#[cfg(nightly)]
pub use self::asm_impl::*;
*/
#[repr(C)]
#[derive(Debug)]
pub struct Registers {
gpr: [usize; 8],
}
impl Registers {
pub fn new() -> Registers {
Registers { gpr: [0; 8] }
}
#[inline]
pub fn prefetch(&self) {
let ptr = self.gpr[1] as *const usize;
unsafe {
prefetch(ptr); // RSP
prefetch(ptr.add(8)); // RSP + 8
}
}
}
pub fn initialize_call_frame(
regs: &mut Registers,
fptr: InitFn,
arg: usize,
arg2: *mut usize,
stack: &Stack,
) {
// Redefinitions from rt/arch/x86_64/regs.h
const RUSTRT_RSP: usize = 1;
const RUSTRT_RBP: usize = 2;
const RUSTRT_R12: usize = 4;
const RUSTRT_R13: usize = 5;
const RUSTRT_R14: usize = 6;
let sp = align_down(stack.end());
// These registers are frobbed by bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[RUSTRT_R12] = arg;
regs.gpr[RUSTRT_R13] = arg2 as usize;
regs.gpr[RUSTRT_R14] = fptr as usize;
// Last base pointer on the stack should be 0
regs.gpr[RUSTRT_RBP] = 0;
// setup the init stack
// this is prepared for the swap context
regs.gpr[RUSTRT_RSP] = mut_offset(sp, -2) as usize;
unsafe {
// leave enough space for RET
*mut_offset(sp, -2) = bootstrap_green_task as usize;
*mut_offset(sp, -1) = 0;
}
}

View file

@ -0,0 +1,247 @@
use crate::detail::{align_down, mut_offset};
use crate::reg_context::InitFn;
use crate::stack::Stack;
// #[cfg(not(nightly))]
#[link(name = "asm", kind = "static")]
extern "C" {
pub fn bootstrap_green_task();
pub fn prefetch_asm(data: *const usize);
pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers);
}
#[inline]
#[allow(dead_code)]
pub fn prefetch(data: *const usize) {
unsafe { prefetch_asm(data) }
}
/*
#[cfg(nightly)]
mod asm_impl {
use super::Registers;
/// prefetch data
#[inline]
pub unsafe extern "C" fn prefetch_asm(data: *const usize) {
llvm_asm!(
"prefetcht1 $0"
: // no output
: "m"(*data)
:
: "volatile"
);
}
#[naked]
#[inline(never)]
pub unsafe extern "C" fn bootstrap_green_task() {
llvm_asm!(
"
mov %r12, %rcx // setup the function arg
mov %r13, %rdx // setup the function arg
and $$-16, %rsp // align the stack pointer
mov %r14, (%rsp) // this is the new return address
"
: // no output
: // no input
: "memory"
: "volatile"
);
}
#[naked]
#[inline(never)]
pub unsafe extern "C" fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers) {
// The first argument is in %rcx, and the second one is in %rdx
llvm_asm!(
""
:
: "{rcx}"(out_regs), "{rdx}"(in_regs)
:
:
);
// introduce this function to workaround rustc bug! (#6)
#[naked]
unsafe extern "C" fn _swap_reg() {
// Save registers
llvm_asm!(
"
mov %rbx, (0*8)(%rcx)
mov %rsp, (1*8)(%rcx)
mov %rbp, (2*8)(%rcx)
mov %r12, (4*8)(%rcx)
mov %r13, (5*8)(%rcx)
mov %r14, (6*8)(%rcx)
mov %r15, (7*8)(%rcx)
mov %rdi, (9*8)(%rcx)
mov %rsi, (10*8)(%rcx)
// mov %rcx, %r10
// and $$0xf0, %r10b
// Save non-volatile XMM registers:
movapd %xmm6, (16*8)(%rcx)
movapd %xmm7, (18*8)(%rcx)
movapd %xmm8, (20*8)(%rcx)
movapd %xmm9, (22*8)(%rcx)
movapd %xmm10, (24*8)(%rcx)
movapd %xmm11, (26*8)(%rcx)
movapd %xmm12, (28*8)(%rcx)
movapd %xmm13, (30*8)(%rcx)
movapd %xmm14, (32*8)(%rcx)
movapd %xmm15, (34*8)(%rcx)
/* load NT_TIB */
movq %gs:(0x30), %r10
/* save current stack base */
movq 0x08(%r10), %rax
mov %rax, (11*8)(%rcx)
/* save current stack limit */
movq 0x10(%r10), %rax
mov %rax, (12*8)(%rcx)
/* save current deallocation stack */
movq 0x1478(%r10), %rax
mov %rax, (13*8)(%rcx)
/* save fiber local storage */
// movq 0x18(%r10), %rax
// mov %rax, (14*8)(%rcx)
// mov %rcx, (3*8)(%rcx)
mov (0*8)(%rdx), %rbx
mov (1*8)(%rdx), %rsp
mov (2*8)(%rdx), %rbp
mov (4*8)(%rdx), %r12
mov (5*8)(%rdx), %r13
mov (6*8)(%rdx), %r14
mov (7*8)(%rdx), %r15
mov (9*8)(%rdx), %rdi
mov (10*8)(%rdx), %rsi
// Restore non-volatile XMM registers:
movapd (16*8)(%rdx), %xmm6
movapd (18*8)(%rdx), %xmm7
movapd (20*8)(%rdx), %xmm8
movapd (22*8)(%rdx), %xmm9
movapd (24*8)(%rdx), %xmm10
movapd (26*8)(%rdx), %xmm11
movapd (28*8)(%rdx), %xmm12
movapd (30*8)(%rdx), %xmm13
movapd (32*8)(%rdx), %xmm14
movapd (34*8)(%rdx), %xmm15
/* load NT_TIB */
movq %gs:(0x30), %r10
/* restore fiber local storage */
// mov (14*8)(%rdx), %rax
// movq %rax, 0x18(%r10)
/* restore deallocation stack */
mov (13*8)(%rdx), %rax
movq %rax, 0x1478(%r10)
/* restore stack limit */
mov (12*8)(%rdx), %rax
movq %rax, 0x10(%r10)
/* restore stack base */
mov (11*8)(%rdx), %rax
movq %rax, 0x8(%r10)
// mov (3*8)(%rdx), %rcx
"
// why save the rcx and rdx in stack? this will overwrite something!
// the naked function should only use the asm block, debug version breaks
// since rustc 1.27.0-nightly, we have to use O2 level optimization (#6)
:
: //"{rcx}"(out_regs), "{rdx}"(in_regs)
: "memory"
: "volatile"
);
}
_swap_reg()
}
}
#[cfg(nightly)]
pub use self::asm_impl::*;
*/
// #[cfg_attr(nightly, repr(simd))]
#[repr(C)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
struct Xmm(u32, u32, u32, u32);
impl Xmm {
pub fn new(a: u32, b: u32, c: u32, d: u32) -> Self {
Xmm(a, b, c, d)
}
}
// windows need to restore xmm6~xmm15, for most cases only use two xmm registers
#[repr(C)]
#[derive(Debug)]
pub struct Registers {
gpr: [usize; 16],
// keep enough for place holder
_xmm: [Xmm; 10],
}
impl Registers {
pub fn new() -> Registers {
Registers {
gpr: [0; 16],
_xmm: [Xmm::new(0, 0, 0, 0); 10],
}
}
#[inline]
pub fn prefetch(&self) {
let ptr = self.gpr[1] as *const usize;
unsafe {
prefetch(ptr); // RSP
prefetch(ptr.add(8)); // RSP + 8
}
}
}
pub fn initialize_call_frame(
regs: &mut Registers,
fptr: InitFn,
arg: usize,
arg2: *mut usize,
stack: &Stack,
) {
// Redefinitions from rt/arch/x86_64/regs.h
const RUSTRT_RSP: usize = 1;
const RUSTRT_RBP: usize = 2;
const RUSTRT_R12: usize = 4;
const RUSTRT_R13: usize = 5;
const RUSTRT_R14: usize = 6;
const RUSTRT_STACK_BASE: usize = 11;
const RUSTRT_STACK_LIMIT: usize = 12;
const RUSTRT_STACK_DEALLOC: usize = 13;
let sp = align_down(stack.end());
// These registers are frobbed by bootstrap_green_task into the right
// location so we can invoke the "real init function", `fptr`.
regs.gpr[RUSTRT_R12] = arg;
regs.gpr[RUSTRT_R13] = arg2 as usize;
regs.gpr[RUSTRT_R14] = fptr as usize;
// Last base pointer on the stack should be 0
regs.gpr[RUSTRT_RBP] = 0;
regs.gpr[RUSTRT_STACK_BASE] = stack.end() as usize;
regs.gpr[RUSTRT_STACK_LIMIT] = stack.begin() as usize;
regs.gpr[RUSTRT_STACK_DEALLOC] = 0; //mut_offset(sp, -8192) as usize;
// setup the init stack
// this is prepared for the swap context
regs.gpr[RUSTRT_RSP] = mut_offset(sp, -2) as usize;
unsafe {
// leave enough space for RET
*mut_offset(sp, -2) = bootstrap_green_task as usize;
*mut_offset(sp, -1) = 0;
}
}

View file

@ -0,0 +1,577 @@
//! # generator
//!
//! Rust generator implementation
//!
use std::any::Any;
use std::fmt;
use std::marker::PhantomData;
use std::panic;
use std::thread;
use crate::reg_context::RegContext;
use crate::rt::{Context, ContextStack, Error};
use crate::scope::Scope;
use crate::stack::{Func, Stack, StackBox};
use crate::yield_::yield_now;
/// The default stack size for generators, in bytes.
// windows has a minimal size as 0x4a8!!!!
pub const DEFAULT_STACK_SIZE: usize = 0x1000;
/// the generator obj type, the functor passed to it must be Send
pub struct GeneratorObj<'a, A, T, const LOCAL: bool> {
gen: StackBox<GeneratorImpl<'a, A, T>>,
}
/// the generator type, the functor passed to it must be Send
pub type Generator<'a, A, T> = GeneratorObj<'a, A, T, false>;
// only when A, T and Functor are all sendable, the generator could be send
unsafe impl<A: Send, T: Send> Send for Generator<'static, A, T> {}
impl<'a, A, T> Generator<'a, A, T> {
/// init a heap based generator with scoped closure
pub fn scoped_init<F: FnOnce(Scope<'a, A, T>) -> T + Send + 'a>(&mut self, f: F)
where
T: Send + 'a,
A: Send + 'a,
{
self.gen.scoped_init(f);
}
/// init a heap based generator
// it's can be used to re-init a 'done' generator before it's get dropped
pub fn init_code<F: FnOnce() -> T + Send + 'a>(&mut self, f: F)
where
T: Send + 'a,
{
self.gen.init_code(f);
}
}
/// the local generator type, can't Send
pub type LocalGenerator<'a, A, T> = GeneratorObj<'a, A, T, true>;
impl<'a, A, T> LocalGenerator<'a, A, T> {
/// init a heap based generator with scoped closure
pub fn scoped_init<F: FnOnce(Scope<'a, A, T>) -> T + 'a>(&mut self, f: F)
where
T: 'a,
A: 'a,
{
self.gen.scoped_init(f);
}
}
impl<'a, A, T, const LOCAL: bool> GeneratorObj<'a, A, T, LOCAL> {
/// Constructs a Generator from a raw pointer.
///
/// # Safety
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
#[inline]
pub unsafe fn from_raw(raw: *mut usize) -> Self {
GeneratorObj {
gen: StackBox::from_raw(raw as *mut GeneratorImpl<'a, A, T>),
}
}
/// Consumes the `Generator`, returning a wrapped raw pointer.
#[inline]
pub fn into_raw(self) -> *mut usize {
let ret = self.gen.as_ptr() as *mut usize;
std::mem::forget(self);
ret
}
/// prefetch the generator into cache
#[inline]
pub fn prefetch(&self) {
self.gen.prefetch();
}
/// prepare the para that passed into generator before send
#[inline]
pub fn set_para(&mut self, para: A) {
self.gen.set_para(para);
}
/// set the generator local data
#[inline]
pub fn set_local_data(&mut self, data: *mut u8) {
self.gen.set_local_data(data);
}
/// get the generator local data
#[inline]
pub fn get_local_data(&self) -> *mut u8 {
self.gen.get_local_data()
}
/// get the generator panic data
#[inline]
pub fn get_panic_data(&mut self) -> Option<Box<dyn Any + Send>> {
self.gen.get_panic_data()
}
/// resume the generator without touch the para
/// you should call `set_para` before this method
#[inline]
pub fn resume(&mut self) -> Option<T> {
self.gen.resume()
}
/// `raw_send`
#[inline]
pub fn raw_send(&mut self, para: Option<A>) -> Option<T> {
self.gen.raw_send(para)
}
/// send interface
pub fn send(&mut self, para: A) -> T {
self.gen.send(para)
}
/// cancel the generator
/// this will trigger a Cancel panic to unwind the stack and finish the generator
pub fn cancel(&mut self) {
self.gen.cancel()
}
/// is finished
#[inline]
pub fn is_done(&self) -> bool {
self.gen.is_done()
}
/// get stack total size and used size in word
pub fn stack_usage(&self) -> (usize, usize) {
self.gen.stack_usage()
}
}
impl<'a, T, const LOCAL: bool> Iterator for GeneratorObj<'a, (), T, LOCAL> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.resume()
}
}
impl<'a, A, T, const LOCAL: bool> fmt::Debug for GeneratorObj<'a, A, T, LOCAL> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Generator<{}, Output={}, Local={}> {{ ... }}",
std::any::type_name::<A>(),
std::any::type_name::<T>(),
LOCAL
)
}
}
/// Generator helper
pub struct Gn<A = ()> {
dummy: PhantomData<A>,
}
impl<A> Gn<A> {
/// create a scoped generator with default stack size
pub fn new_scoped<'a, T, F>(f: F) -> Generator<'a, A, T>
where
F: FnOnce(Scope<A, T>) -> T + Send + 'a,
T: Send + 'a,
A: Send + 'a,
{
Self::new_scoped_opt(DEFAULT_STACK_SIZE, f)
}
/// create a scoped local generator with default stack size
pub fn new_scoped_local<'a, T, F>(f: F) -> LocalGenerator<'a, A, T>
where
F: FnOnce(Scope<A, T>) -> T + 'a,
T: 'a,
A: 'a,
{
Self::new_scoped_opt_local(DEFAULT_STACK_SIZE, f)
}
/// create a scoped generator with specified stack size
pub fn new_scoped_opt<'a, T, F>(size: usize, f: F) -> Generator<'a, A, T>
where
F: FnOnce(Scope<A, T>) -> T + Send + 'a,
T: Send + 'a,
A: Send + 'a,
{
let mut gen = GeneratorImpl::<A, T>::new(Stack::new(size));
gen.scoped_init(f);
Generator { gen }
}
/// create a scoped local generator with specified stack size
pub fn new_scoped_opt_local<'a, T, F>(size: usize, f: F) -> LocalGenerator<'a, A, T>
where
F: FnOnce(Scope<A, T>) -> T + 'a,
T: 'a,
A: 'a,
{
let mut gen = GeneratorImpl::<A, T>::new(Stack::new(size));
gen.scoped_init(f);
LocalGenerator { gen }
}
}
impl<A: Any> Gn<A> {
/// create a new generator with default stack size
#[cfg_attr(feature = "cargo-clippy", allow(clippy::new_ret_no_self))]
#[deprecated(since = "0.6.18", note = "please use `scope` version instead")]
pub fn new<'a, T: Any, F>(f: F) -> Generator<'a, A, T>
where
F: FnOnce() -> T + Send + 'a,
{
Self::new_opt(DEFAULT_STACK_SIZE, f)
}
/// create a new generator with specified stack size
// the `may` library use this API so we can't deprecated it yet.
pub fn new_opt<'a, T: Any, F>(size: usize, f: F) -> Generator<'a, A, T>
where
F: FnOnce() -> T + Send + 'a,
{
let mut gen = GeneratorImpl::<A, T>::new(Stack::new(size));
gen.init_context();
gen.init_code(f);
Generator { gen }
}
}
/// `GeneratorImpl`
#[repr(C)]
struct GeneratorImpl<'a, A, T> {
// run time context
context: Context,
// stack
stack: Stack,
// save the input
para: Option<A>,
// save the output
ret: Option<T>,
// boxed functor
f: Option<Func>,
// phantom lifetime
phantom: PhantomData<&'a T>,
}
impl<'a, A: Any, T: Any> GeneratorImpl<'a, A, T> {
/// create a new generator with default stack size
fn init_context(&mut self) {
unsafe {
std::ptr::write(
self.context.para.as_mut_ptr(),
&mut self.para as &mut dyn Any,
);
std::ptr::write(self.context.ret.as_mut_ptr(), &mut self.ret as &mut dyn Any);
}
}
}
impl<'a, A, T> GeneratorImpl<'a, A, T> {
/// create a new generator with specified stack size
fn new(mut stack: Stack) -> StackBox<Self> {
// the stack box would finally dealloc the stack!
unsafe {
let mut stack_box = stack.alloc_uninit_box::<GeneratorImpl<'a, A, T>>();
(*stack_box.as_mut_ptr()).init(GeneratorImpl {
para: None,
stack,
ret: None,
f: None,
context: Context::new(),
phantom: PhantomData,
});
stack_box.assume_init()
}
}
/// prefetch the generator into cache
#[inline]
pub fn prefetch(&self) {
self.context.regs.prefetch();
}
/// init a heap based generator with scoped closure
fn scoped_init<F: FnOnce(Scope<'a, A, T>) -> T + 'a>(&mut self, f: F)
where
T: 'a,
A: 'a,
{
use std::mem::transmute;
let scope = unsafe { transmute(Scope::new(&mut self.para, &mut self.ret)) };
self.init_code(move || f(scope));
}
/// init a heap based generator
// it's can be used to re-init a 'done' generator before it's get dropped
fn init_code<F: FnOnce() -> T + 'a>(&mut self, f: F)
where
T: 'a,
{
// make sure the last one is finished
if self.f.is_none() && self.context._ref == 0 {
self.cancel();
} else {
let _ = self.f.take();
}
// init ctx parent to itself, this would be the new top
self.context.parent = &mut self.context;
// init the ref to 0 means that it's ready to start
self.context._ref = 0;
let ret = &mut self.ret as *mut _;
// alloc the function on stack
let func = StackBox::new_fn_once(&mut self.stack, move || {
let r = f();
unsafe { *ret = Some(r) };
});
self.f = Some(func);
self.context.regs.init_with(
gen_init,
0,
&mut self.f as *mut _ as *mut usize,
&self.stack,
);
}
/// resume the generator
#[inline]
fn resume_gen(&mut self) {
let env = ContextStack::current();
// get the current regs
let cur = &mut env.top().regs;
// switch to new context, always use the top context's reg
// for normal generator self.context.parent == self.context
// for coroutine self.context.parent == top generator context
debug_assert!(!self.context.parent.is_null());
let top = unsafe { &mut *self.context.parent };
// save current generator context on stack
env.push_context(&mut self.context);
// swap to the generator
RegContext::swap(cur, &top.regs);
// comes back, check the panic status
// this would propagate the panic until root context
// if it's a coroutine just stop propagate
if !self.context.local_data.is_null() {
return;
}
if let Some(err) = self.context.err.take() {
// pass the error to the parent until root
panic::resume_unwind(err);
}
}
#[inline]
fn is_started(&self) -> bool {
// when the f is consumed we think it's running
self.f.is_none()
}
/// prepare the para that passed into generator before send
#[inline]
fn set_para(&mut self, para: A) {
self.para = Some(para);
}
/// set the generator local data
#[inline]
fn set_local_data(&mut self, data: *mut u8) {
self.context.local_data = data;
}
/// get the generator local data
#[inline]
fn get_local_data(&self) -> *mut u8 {
self.context.local_data
}
/// get the generator panic data
#[inline]
fn get_panic_data(&mut self) -> Option<Box<dyn Any + Send>> {
self.context.err.take()
}
/// resume the generator without touch the para
/// you should call `set_para` before this method
#[inline]
fn resume(&mut self) -> Option<T> {
if self.is_done() {
return None;
}
// every time we call the function, increase the ref count
// yield will decrease it and return will not
self.context._ref += 1;
self.resume_gen();
self.ret.take()
}
/// `raw_send`
#[inline]
fn raw_send(&mut self, para: Option<A>) -> Option<T> {
if self.is_done() {
return None;
}
// this is the passed in value of the send primitive
// the yield part would read out this value in the next round
self.para = para;
// every time we call the function, increase the ref count
// yield will decrease it and return will not
self.context._ref += 1;
self.resume_gen();
self.ret.take()
}
/// send interface
fn send(&mut self, para: A) -> T {
let ret = self.raw_send(Some(para));
ret.expect("send got None return")
}
/// cancel the generator without any check
#[inline]
fn raw_cancel(&mut self) {
// tell the func to panic
// so that we can stop the inner func
self.context._ref = 2;
// save the old panic hook, we don't want to print anything for the Cancel
let old = ::std::panic::take_hook();
::std::panic::set_hook(Box::new(|_| {}));
self.resume_gen();
::std::panic::set_hook(old);
}
/// cancel the generator
/// this will trigger a Cancel panic to unwind the stack
fn cancel(&mut self) {
if self.is_done() {
return;
}
// consume the fun if it's not started
if !self.is_started() {
self.f.take();
self.context._ref = 1;
} else {
self.raw_cancel();
}
}
/// is finished
#[inline]
fn is_done(&self) -> bool {
self.is_started() && (self.context._ref & 0x3) != 0
}
/// get stack total size and used size in word
fn stack_usage(&self) -> (usize, usize) {
(self.stack.size(), self.stack.get_used_size())
}
}
impl<'a, A, T> Drop for GeneratorImpl<'a, A, T> {
fn drop(&mut self) {
// when the thread is already panic, do nothing
if thread::panicking() {
return;
}
if !self.is_started() {
// not started yet, just drop the gen
return;
}
if !self.is_done() {
trace!("generator is not done while drop");
self.raw_cancel()
}
assert!(self.is_done());
let (total_stack, used_stack) = self.stack_usage();
if used_stack < total_stack {
// here we should record the stack in the class
// next time will just use
// set_stack_size::<F>(used_stack);
} else {
error!("stack overflow detected!");
std::panic::panic_any(Error::StackErr);
}
}
}
/// don't print panic info for Done/Cancel
fn catch_unwind_filter<F: FnOnce() -> R + panic::UnwindSafe, R>(f: F) -> std::thread::Result<R> {
use std::sync::Once;
static INIT: std::sync::Once = Once::new();
INIT.call_once(|| {
let prev_hook = panic::take_hook();
panic::set_hook(Box::new(move |info| {
if let Some(e) = info.payload().downcast_ref::<Error>() {
match e {
// this is not an error at all, ignore it
Error::Cancel | Error::Done => return,
_ => {}
}
}
prev_hook(info);
}));
});
panic::catch_unwind(f)
}
/// the init function passed to reg_context
fn gen_init(_: usize, f: *mut usize) -> ! {
let clo = move || {
// consume self.f
let f: &mut Option<Func> = unsafe { &mut *(f as *mut _) };
let func = f.take().unwrap();
func.call_once();
};
fn check_err(cause: Box<dyn Any + Send + 'static>) {
if let Some(e) = cause.downcast_ref::<Error>() {
match e {
// this is not an error at all, ignore it
Error::Cancel | Error::Done => return,
_ => {}
}
}
error!("set panic inside generator");
ContextStack::current().top().err = Some(cause);
}
// we can't panic inside the generator context
// need to propagate the panic to the main thread
if let Err(cause) = catch_unwind_filter(clo) {
check_err(cause);
}
yield_now();
unreachable!("Should never come back");
}

27
third-party/vendor/generator/src/lib.rs vendored Normal file
View file

@ -0,0 +1,27 @@
//! # generator
//!
//! Rust generator library
//!
#![cfg_attr(nightly, feature(thread_local))]
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
#![allow(deprecated)]
#[macro_use]
extern crate log;
mod detail;
mod gen_impl;
mod reg_context;
mod rt;
mod scope;
mod stack;
mod yield_;
pub use crate::gen_impl::{Generator, Gn, LocalGenerator, DEFAULT_STACK_SIZE};
pub use crate::rt::{get_local_data, is_generator, Error};
pub use crate::scope::Scope;
pub use crate::yield_::{
co_get_yield, co_set_para, co_yield_with, done, get_yield, yield_, yield_from, yield_with,
};

View file

@ -0,0 +1,106 @@
use crate::detail::{initialize_call_frame, swap_registers, Registers};
use crate::stack::Stack;
#[derive(Debug)]
pub struct RegContext {
/// Hold the registers while the task or scheduler is suspended
regs: Registers,
}
// first argument is task handle, second is thunk ptr
pub type InitFn = fn(usize, *mut usize) -> !;
impl RegContext {
pub fn empty() -> RegContext {
RegContext {
regs: Registers::new(),
}
}
#[inline]
pub fn prefetch(&self) {
self.regs.prefetch();
}
/// Create a new context
#[allow(dead_code)]
pub fn new(init: InitFn, arg: usize, start: *mut usize, stack: &Stack) -> RegContext {
let mut ctx = RegContext::empty();
ctx.init_with(init, arg, start, stack);
ctx
}
/// init the generator register
#[inline]
pub fn init_with(&mut self, init: InitFn, arg: usize, start: *mut usize, stack: &Stack) {
// Save and then immediately load the current context,
// we will modify it to call the given function when restored back
initialize_call_frame(&mut self.regs, init, arg, start, stack);
}
/// Switch contexts
///
/// Suspend the current execution context and resume another by
/// saving the registers values of the executing thread to a Context
/// then loading the registers from a previously saved Context.
#[inline]
pub fn swap(out_context: &mut RegContext, in_context: &RegContext) {
// debug!("register raw swap");
unsafe { swap_registers(&mut out_context.regs, &in_context.regs) }
}
/// Load the context and switch. This function will never return.
#[inline]
#[allow(dead_code)]
pub fn load(to_context: &RegContext) {
let mut cur = Registers::new();
let regs: &Registers = &to_context.regs;
unsafe { swap_registers(&mut cur, regs) }
}
}
#[cfg(test)]
mod test {
use std::mem::transmute;
use crate::reg_context::RegContext;
use crate::stack::Stack;
const MIN_STACK: usize = 1024;
fn init_fn(arg: usize, f: *mut usize) -> ! {
let func: fn() = unsafe { transmute(f) };
func();
let ctx: &RegContext = unsafe { transmute(arg) };
RegContext::load(ctx);
unreachable!("Should never comeback");
}
#[test]
fn test_swap_context() {
static mut VAL: bool = false;
let mut cur = RegContext::empty();
fn callback() {
unsafe {
VAL = true;
}
}
let stk = Stack::new(MIN_STACK);
let ctx = RegContext::new(
init_fn,
unsafe { transmute(&cur) },
callback as *mut usize,
&stk,
);
RegContext::swap(&mut cur, &ctx);
unsafe {
assert!(VAL);
}
}
}

304
third-party/vendor/generator/src/rt.rs vendored Normal file
View file

@ -0,0 +1,304 @@
//! # generator run time support
//!
//! generator run time context management
//!
use std::any::Any;
use std::mem::MaybeUninit;
use std::ptr;
use crate::reg_context::RegContext;
thread_local!(
/// each thread has it's own generator context stack
static ROOT_CONTEXT: Box<Context> = {
let mut root = Box::new(Context::new());
let p = &mut *root as *mut _;
root.parent = p; // init top to current
root
}
);
// fast access pointer, this is will be init only once
// when ROOT_CONTEXT get initialized. but in debug mode it
// will be zero in generator context since the stack changed
// to a different place, be careful about that.
#[cfg(nightly)]
#[thread_local]
static mut ROOT_CONTEXT_P: *mut Context = ptr::null_mut();
/// yield panic error types
#[allow(dead_code)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Error {
/// Done panic
Done,
/// Cancel panic
Cancel,
/// Type mismatch panic
TypeErr,
/// Stack overflow panic
StackErr,
/// Wrong Context panic
ContextErr,
}
/// generator context
#[repr(C)]
#[repr(align(128))]
pub struct Context {
/// generator regs context
pub regs: RegContext,
/// child context
child: *mut Context,
/// parent context
pub parent: *mut Context,
/// passed in para for send
pub para: MaybeUninit<*mut dyn Any>,
/// this is just a buffer for the return value
pub ret: MaybeUninit<*mut dyn Any>,
/// track generator ref, yield will -1, send will +1
pub _ref: usize,
/// context local storage
pub local_data: *mut u8,
/// propagate panic
pub err: Option<Box<dyn Any + Send>>,
}
impl Context {
/// return a default generator context
pub fn new() -> Context {
Context {
regs: RegContext::empty(),
para: MaybeUninit::zeroed(),
ret: MaybeUninit::zeroed(),
_ref: 1, // none zero means it's not running
err: None,
child: ptr::null_mut(),
parent: ptr::null_mut(),
local_data: ptr::null_mut(),
}
}
/// judge it's generator context
#[inline]
pub fn is_generator(&self) -> bool {
self.parent != self as *const _ as *mut _
}
/// get current generator send para
#[inline]
pub fn get_para<A>(&mut self) -> Option<A>
where
A: Any,
{
let para = unsafe {
let para_ptr = *self.para.as_mut_ptr();
assert!(!para_ptr.is_null());
&mut *para_ptr
};
match para.downcast_mut::<Option<A>>() {
Some(v) => v.take(),
None => type_error::<A>("get yield type mismatch error detected"),
}
}
/// get coroutine send para
#[inline]
pub fn co_get_para<A>(&mut self) -> Option<A> {
let para = unsafe {
let para_ptr = *self.para.as_mut_ptr();
debug_assert!(!para_ptr.is_null());
&mut *(para_ptr as *mut Option<A>)
};
para.take()
}
/// set current generator send para
// #[inline]
// pub fn set_para<A>(&self, data: A)
// where
// A: Any,
// {
// let para = unsafe { &mut *self.para };
// match para.downcast_mut::<Option<A>>() {
// Some(v) => *v = Some(data),
// None => type_error::<A>("set yield type mismatch error detected"),
// }
// }
/// set coroutine send para
/// without check the data type for coroutine performance reason
#[inline]
pub fn co_set_para<A>(&mut self, data: A) {
let para = unsafe {
let para_ptr = *self.para.as_mut_ptr();
debug_assert!(!para_ptr.is_null());
&mut *(para_ptr as *mut Option<A>)
};
*para = Some(data);
}
/// set current generator return value
#[inline]
pub fn set_ret<T>(&mut self, v: T)
where
T: Any,
{
let ret = unsafe {
let ret_ptr = *self.ret.as_mut_ptr();
assert!(!ret_ptr.is_null());
&mut *ret_ptr
};
match ret.downcast_mut::<Option<T>>() {
Some(r) => *r = Some(v),
None => type_error::<T>("yield type mismatch error detected"),
}
}
/// set coroutine return value
/// without check the data type for coroutine performance reason
#[inline]
pub fn co_set_ret<T>(&mut self, v: T) {
let ret = unsafe {
let ret_ptr = *self.ret.as_mut_ptr();
debug_assert!(!ret_ptr.is_null());
&mut *(ret_ptr as *mut Option<T>)
};
*ret = Some(v);
}
}
/// Coroutine managing environment
pub struct ContextStack {
root: *mut Context,
}
#[cfg(nightly)]
#[inline(never)]
unsafe fn init_root_p() {
ROOT_CONTEXT_P = ROOT_CONTEXT.with(|r| &**r as *const _ as *mut Context);
}
impl ContextStack {
#[cfg(nightly)]
#[inline(never)]
pub fn current() -> ContextStack {
unsafe {
if ROOT_CONTEXT_P.is_null() {
init_root_p();
}
ContextStack {
root: ROOT_CONTEXT_P,
}
}
}
#[cfg(not(nightly))]
#[inline(never)]
pub fn current() -> ContextStack {
let root = ROOT_CONTEXT.with(|r| &**r as *const _ as *mut Context);
ContextStack { root }
}
/// get the top context
#[inline]
pub fn top(&self) -> &'static mut Context {
let root = unsafe { &mut *self.root };
unsafe { &mut *root.parent }
}
/// get the coroutine context
#[inline]
pub fn co_ctx(&self) -> Option<&'static mut Context> {
let root = unsafe { &mut *self.root };
// search from top
let mut ctx = unsafe { &mut *root.parent };
while ctx as *const _ != root as *const _ {
if !ctx.local_data.is_null() {
return Some(ctx);
}
ctx = unsafe { &mut *ctx.parent };
}
// not find any coroutine
None
}
/// push the context to the thread context list
#[inline]
pub fn push_context(&self, ctx: *mut Context) {
let root = unsafe { &mut *self.root };
let ctx = unsafe { &mut *ctx };
let top = unsafe { &mut *root.parent };
let new_top = ctx.parent;
// link top and new ctx
top.child = ctx;
ctx.parent = top;
// save the new top
root.parent = new_top;
}
/// pop the context from the thread context list and return it's parent context
#[inline]
pub fn pop_context(&self, ctx: *mut Context) -> &'static mut Context {
let root = unsafe { &mut *self.root };
let ctx = unsafe { &mut *ctx };
let parent = unsafe { &mut *ctx.parent };
// save the old top in ctx's parent
ctx.parent = root.parent;
// unlink ctx and it's parent
parent.child = ptr::null_mut();
// save the new top
root.parent = parent;
parent
}
}
#[inline]
fn type_error<A>(msg: &str) -> ! {
error!("{msg}, expected type: {}", std::any::type_name::<A>());
std::panic::panic_any(Error::TypeErr)
}
/// check the current context if it's generator
#[inline]
pub fn is_generator() -> bool {
let env = ContextStack::current();
let root = unsafe { &mut *env.root };
!root.child.is_null()
}
/// get the current context local data
/// only coroutine support local data
#[inline]
pub fn get_local_data() -> *mut u8 {
let env = ContextStack::current();
let root = unsafe { &mut *env.root };
// search from top
let mut ctx = unsafe { &mut *root.parent };
while ctx as *const _ != root as *const _ {
if !ctx.local_data.is_null() {
return ctx.local_data;
}
ctx = unsafe { &mut *ctx.parent };
}
ptr::null_mut()
}
#[cfg(test)]
mod test {
use super::is_generator;
#[test]
fn test_is_context() {
// this is the root context
assert!(!is_generator());
}
}

View file

@ -0,0 +1,90 @@
//! # yield
//!
//! generator yield implementation
//!
use std::sync::atomic;
use crate::gen_impl::Generator;
use crate::rt::{Context, ContextStack, Error};
use crate::yield_::raw_yield_now;
/// passed in scope type
/// it not use the context to pass data, but keep it's own data ref
/// this struct provide both compile type info and runtime data
pub struct Scope<'a, A, T> {
para: &'a mut Option<A>,
ret: &'a mut Option<T>,
}
impl<'a, A, T> Scope<'a, A, T> {
/// create a new scope object
pub(crate) fn new(para: &'a mut Option<A>, ret: &'a mut Option<T>) -> Self {
Scope { para, ret }
}
/// set current generator return value
#[inline]
fn set_ret(&mut self, v: T) {
*self.ret = Some(v);
}
/// raw yield without catch passed in para
#[inline]
fn raw_yield(&mut self, env: &ContextStack, context: &mut Context, v: T) {
// check the context
if !context.is_generator() {
panic!("yield from none generator context");
}
self.set_ret(v);
context._ref -= 1;
raw_yield_now(env, context);
// here we just panic to exit the func
if context._ref != 1 {
std::panic::panic_any(Error::Cancel);
}
}
/// yield something without catch passed in para
#[inline]
pub fn yield_with(&mut self, v: T) {
let env = ContextStack::current();
let context = env.top();
self.raw_yield(&env, context, v);
}
/// get current generator send para
#[inline]
pub fn get_yield(&mut self) -> Option<A> {
self.para.take()
}
/// yield and get the send para
// it's totally safe that we can refer to the function block
// since we will come back later
#[inline]
pub fn yield_(&mut self, v: T) -> Option<A> {
self.yield_with(v);
atomic::compiler_fence(atomic::Ordering::Acquire);
self.get_yield()
}
/// `yield_from`
/// the from generator must has the same type as itself
pub fn yield_from(&mut self, mut g: Generator<A, T>) -> Option<A> {
let env = ContextStack::current();
let context = env.top();
let mut p = self.get_yield();
while !g.is_done() {
match g.raw_send(p) {
None => return None,
Some(r) => self.raw_yield(&env, context, r),
}
p = self.get_yield();
}
drop(g); // explicitly consume g
p
}
}

View file

@ -0,0 +1,424 @@
//! # generator stack
//!
//!
use std::error::Error;
use std::fmt::{self, Display};
use std::io;
use std::mem::MaybeUninit;
use std::os::raw::c_void;
use std::ptr;
#[cfg(all(unix, target_arch = "x86_64"))]
#[path = "unix.rs"]
pub mod sys;
#[cfg(all(unix, target_arch = "aarch64"))]
#[path = "unix.rs"]
pub mod sys;
#[cfg(all(windows, target_arch = "x86_64"))]
#[path = "windows.rs"]
pub mod sys;
// must align with StackBoxHeader
const ALIGN: usize = std::mem::size_of::<StackBoxHeader>();
const HEADER_SIZE: usize = std::mem::size_of::<StackBoxHeader>() / std::mem::size_of::<usize>();
struct StackBoxHeader {
// track the stack
stack: Stack,
// track how big the data is (in usize)
data_size: usize,
// non zero dealloc the stack
need_drop: usize,
}
/// A pointer type for stack allocation.
pub struct StackBox<T> {
// the stack memory
ptr: ptr::NonNull<T>,
}
impl<T> StackBox<T> {
/// create uninit stack box
fn new_uninit(stack: &mut Stack, need_drop: usize) -> MaybeUninit<Self> {
let offset = unsafe { &mut *stack.get_offset() };
// alloc the data
let layout = std::alloc::Layout::new::<T>();
let align = std::cmp::max(layout.align(), ALIGN);
let size = ((layout.size() + align - 1) & !(align - 1)) / std::mem::size_of::<usize>();
let u_align = align / std::mem::size_of::<usize>();
let pad_size = u_align - (*offset + size) % u_align;
let data_size = size + pad_size;
*offset += data_size;
let ptr = unsafe { ptr::NonNull::new_unchecked(stack.end() as *mut T) };
// init the header
*offset += HEADER_SIZE;
unsafe {
let mut header = ptr::NonNull::new_unchecked(stack.end() as *mut StackBoxHeader);
let header = header.as_mut();
header.data_size = data_size;
header.need_drop = need_drop;
header.stack = stack.shadow_clone();
std::mem::MaybeUninit::new(StackBox { ptr })
}
}
fn get_header(&self) -> &StackBoxHeader {
unsafe {
let header = (self.ptr.as_ptr() as *mut usize).offset(0 - HEADER_SIZE as isize);
&*(header as *const StackBoxHeader)
}
}
/// move data into the box
pub(crate) unsafe fn init(&mut self, data: T) {
ptr::write(self.ptr.as_ptr(), data);
}
// get the stack ptr
pub(crate) fn as_ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Constructs a StackBox from a raw pointer.
///
/// # Safety
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
#[inline]
pub(crate) unsafe fn from_raw(raw: *mut T) -> Self {
StackBox {
ptr: ptr::NonNull::new_unchecked(raw),
}
}
// Consumes the `StackBox`, returning a wrapped raw pointer.
// #[inline]
// pub(crate) fn into_raw(b: StackBox<T>) -> *mut T {
// let ret = b.ptr.as_ptr();
// std::mem::forget(b);
// ret
// }
}
pub struct Func {
data: *mut (),
size: usize,
offset: *mut usize,
func: fn(*mut ()),
drop: fn(*mut ()),
}
impl Func {
pub fn call_once(mut self) {
let data = self.data;
self.data = ptr::null_mut();
(self.func)(data);
}
}
impl Drop for Func {
fn drop(&mut self) {
if !self.data.is_null() {
(self.drop)(self.data);
}
unsafe { *self.offset -= self.size };
}
}
impl<F: FnOnce()> StackBox<F> {
fn call_once(data: *mut ()) {
unsafe {
let data = data as *mut F;
let f = data.read();
f();
}
}
fn drop_inner(data: *mut ()) {
unsafe {
let data = data as *mut F;
ptr::drop_in_place(data);
}
}
/// create a functor on the stack
pub(crate) fn new_fn_once(stack: &mut Stack, data: F) -> Func {
unsafe {
let mut d = Self::new_uninit(stack, 0);
(*d.as_mut_ptr()).init(data);
let d = d.assume_init();
let header = d.get_header();
let f = Func {
data: d.ptr.as_ptr() as *mut (),
size: header.data_size + HEADER_SIZE,
offset: stack.get_offset(),
func: Self::call_once,
drop: Self::drop_inner,
};
std::mem::forget(d);
f
}
}
}
impl<T> std::ops::Deref for StackBox<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { self.ptr.as_ref() }
}
}
impl<T> std::ops::DerefMut for StackBox<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.ptr.as_mut() }
}
}
impl<T> Drop for StackBox<T> {
fn drop(&mut self) {
let header = self.get_header();
unsafe {
*header.stack.get_offset() -= header.data_size + HEADER_SIZE;
ptr::drop_in_place(self.ptr.as_ptr());
if header.need_drop != 0 {
header.stack.drop_stack();
}
}
}
}
/// Error type returned by stack allocation methods.
#[derive(Debug)]
pub enum StackError {
/// Contains the maximum amount of memory allowed to be allocated as stack space.
ExceedsMaximumSize(usize),
/// Returned if some kind of I/O error happens during allocation.
IoError(io::Error),
}
impl Display for StackError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
StackError::ExceedsMaximumSize(size) => write!(
fmt,
"Requested more than max size of {size} bytes for a stack"
),
StackError::IoError(ref e) => e.fmt(fmt),
}
}
}
impl Error for StackError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match *self {
StackError::ExceedsMaximumSize(_) => None,
StackError::IoError(ref e) => Some(e),
}
}
}
/// Represents any kind of stack memory.
///
/// `FixedSizeStack` as well as `ProtectedFixedSizeStack`
/// can be used to allocate actual stack space.
#[derive(Debug)]
pub struct SysStack {
top: *mut c_void,
bottom: *mut c_void,
}
impl SysStack {
/// Creates a (non-owning) representation of some stack memory.
///
/// It is unsafe because it is your responsibility to make sure that `top` and `bottom` are valid
/// addresses.
#[inline]
pub unsafe fn new(top: *mut c_void, bottom: *mut c_void) -> SysStack {
debug_assert!(top >= bottom);
SysStack { top, bottom }
}
/// Returns the top of the stack from which on it grows downwards towards bottom().
#[inline]
pub fn top(&self) -> *mut c_void {
self.top
}
/// Returns the bottom of the stack and thus it's end.
#[inline]
pub fn bottom(&self) -> *mut c_void {
self.bottom
}
/// Returns the size of the stack between top() and bottom().
#[inline]
pub fn len(&self) -> usize {
self.top as usize - self.bottom as usize
}
/// Returns the minimal stack size allowed by the current platform.
#[inline]
pub fn min_size() -> usize {
sys::min_stack_size()
}
/// Allocates a new stack of `size`.
fn allocate(mut size: usize, protected: bool) -> Result<SysStack, StackError> {
let page_size = sys::page_size();
let min_stack_size = sys::min_stack_size();
let max_stack_size = sys::max_stack_size();
let add_shift = i32::from(protected);
let add = page_size << add_shift;
if size < min_stack_size {
size = min_stack_size;
}
size = (size - 1) & !(page_size.overflowing_sub(1).0);
if let Some(size) = size.checked_add(add) {
if size <= max_stack_size {
let mut ret = unsafe { sys::allocate_stack(size) };
if protected {
if let Ok(stack) = ret {
ret = unsafe { sys::protect_stack(&stack) };
}
}
return ret.map_err(StackError::IoError);
}
}
Err(StackError::ExceedsMaximumSize(max_stack_size - add))
}
}
unsafe impl Send for SysStack {}
/// generator stack
/// this struct will not dealloc the memory
/// instead StackBox<> would track it's usage and dealloc it
pub struct Stack {
buf: SysStack,
}
impl Stack {
/// Allocate a new stack of `size`. If size = 0, this is a `dummy_stack`
pub fn new(size: usize) -> Stack {
let track = (size & 1) != 0;
let mut bytes = size * std::mem::size_of::<usize>();
// the minimal size
let min_size = SysStack::min_size();
if bytes < min_size {
bytes = min_size;
}
let buf = SysStack::allocate(bytes, true).expect("failed to alloc sys stack");
let stk = Stack { buf };
// if size is not even we do the full foot print test
let count = if track {
stk.size()
} else {
// we only check the last few words
8
};
unsafe {
let buf = stk.buf.bottom as *mut usize;
ptr::write_bytes(buf, 0xEE, count);
}
// init the stack box usage
let offset = stk.get_offset();
unsafe { *offset = 1 };
stk
}
/// get used stack size
pub fn get_used_size(&self) -> usize {
let mut offset: usize = 0;
unsafe {
let mut magic: usize = 0xEE;
ptr::write_bytes(&mut magic, 0xEE, 1);
let mut ptr = self.buf.bottom as *mut usize;
while *ptr == magic {
offset += 1;
ptr = ptr.offset(1);
}
}
let cap = self.size();
cap - offset
}
/// get the stack cap
#[inline]
pub fn size(&self) -> usize {
self.buf.len() / std::mem::size_of::<usize>()
}
/// Point to the high end of the allocated stack
pub fn end(&self) -> *mut usize {
let offset = self.get_offset();
unsafe { (self.buf.top as *mut usize).offset(0 - *offset as isize) }
}
/// Point to the low end of the allocated stack
#[allow(dead_code)]
pub fn begin(&self) -> *mut usize {
self.buf.bottom as *mut _
}
/// alloc buffer on this stack
pub fn alloc_uninit_box<T>(&mut self) -> MaybeUninit<StackBox<T>> {
// the first obj should set need drop to non zero
StackBox::<T>::new_uninit(self, 1)
}
// get offset
fn get_offset(&self) -> *mut usize {
unsafe { (self.buf.top as *mut usize).offset(-1) }
}
// dealloc the stack
fn drop_stack(&self) {
if self.buf.len() == 0 {
return;
}
let page_size = sys::page_size();
let guard = (self.buf.bottom as usize - page_size) as *mut c_void;
let size_with_guard = self.buf.len() + page_size;
unsafe {
sys::deallocate_stack(guard, size_with_guard);
}
}
fn shadow_clone(&self) -> Self {
Stack {
buf: SysStack {
top: self.buf.top,
bottom: self.buf.bottom,
},
}
}
}
impl fmt::Debug for Stack {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let offset = self.get_offset();
write!(f, "Stack<{:?}, Offset={}>", self.buf, unsafe { *offset })
}
}

View file

@ -0,0 +1,123 @@
use std::io;
use std::mem;
use std::os::raw::c_void;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::usize;
use super::SysStack;
#[cfg(any(
target_os = "openbsd",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "illumos",
target_os = "solaris"
))]
const MAP_STACK: libc::c_int = 0;
#[cfg(not(any(
target_os = "openbsd",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "illumos",
target_os = "solaris"
)))]
const MAP_STACK: libc::c_int = libc::MAP_STACK;
pub unsafe fn allocate_stack(size: usize) -> io::Result<SysStack> {
const NULL: *mut libc::c_void = 0 as *mut libc::c_void;
const PROT: libc::c_int = libc::PROT_READ | libc::PROT_WRITE;
const TYPE: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON | MAP_STACK;
let ptr = libc::mmap(NULL, size, PROT, TYPE, -1, 0);
if ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(SysStack::new(
(ptr as usize + size) as *mut c_void,
ptr as *mut c_void,
))
}
}
pub unsafe fn protect_stack(stack: &SysStack) -> io::Result<SysStack> {
let page_size = page_size();
debug_assert!(stack.len() % page_size == 0 && stack.len() != 0);
let ret = {
let bottom = stack.bottom() as *mut libc::c_void;
libc::mprotect(bottom, page_size, libc::PROT_NONE)
};
if ret != 0 {
Err(io::Error::last_os_error())
} else {
let bottom = (stack.bottom() as usize + page_size) as *mut c_void;
Ok(SysStack::new(stack.top(), bottom))
}
}
pub unsafe fn deallocate_stack(ptr: *mut c_void, size: usize) {
libc::munmap(ptr as *mut libc::c_void, size);
}
pub fn page_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
let mut ret = PAGE_SIZE.load(Ordering::Relaxed);
if ret == 0 {
unsafe {
ret = libc::sysconf(libc::_SC_PAGESIZE) as usize;
}
PAGE_SIZE.store(ret, Ordering::Relaxed);
}
ret
}
pub fn min_stack_size() -> usize {
// Previously libc::SIGSTKSZ has been used for this, but it proofed to be very unreliable,
// because the resulting values varied greatly between platforms.
page_size()
}
#[cfg(not(target_os = "fuchsia"))]
pub fn max_stack_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
let mut ret = PAGE_SIZE.load(Ordering::Relaxed);
if ret == 0 {
let mut limit = mem::MaybeUninit::uninit();
let limitret = unsafe { libc::getrlimit(libc::RLIMIT_STACK, limit.as_mut_ptr()) };
let limit = unsafe { limit.assume_init() };
if limitret == 0 {
ret = if limit.rlim_max == libc::RLIM_INFINITY
|| limit.rlim_max > (usize::MAX as libc::rlim_t)
{
usize::MAX
} else {
limit.rlim_max as usize
};
PAGE_SIZE.store(ret, Ordering::Relaxed);
} else {
ret = 1024 * 1024 * 1024;
}
}
ret
}
#[cfg(target_os = "fuchsia")]
pub fn max_stack_size() -> usize {
// Fuchsia doesn't have a platform defined hard cap.
usize::MAX
}

View file

@ -0,0 +1,82 @@
use std::io;
use std::mem;
use std::os::raw::c_void;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::usize;
use windows::Win32::System::Memory::*;
use windows::Win32::System::SystemInformation::*;
use super::SysStack;
pub unsafe fn allocate_stack(size: usize) -> io::Result<SysStack> {
let ptr = VirtualAlloc(
Some(ptr::null()),
size,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
);
if ptr.is_null() {
Err(io::Error::last_os_error())
} else {
Ok(SysStack::new(
(ptr as usize + size) as *mut c_void,
ptr as *mut c_void,
))
}
}
pub unsafe fn protect_stack(stack: &SysStack) -> io::Result<SysStack> {
let page_size = page_size();
let mut old_prot = PAGE_PROTECTION_FLAGS(0);
debug_assert!(stack.len() % page_size == 0 && stack.len() != 0);
let ret = VirtualProtect(
stack.bottom(),
page_size,
PAGE_READONLY | PAGE_GUARD,
&mut old_prot,
);
if !ret.as_bool() {
Err(io::Error::last_os_error())
} else {
let bottom = (stack.bottom() as usize + page_size) as *mut c_void;
Ok(SysStack::new(stack.top(), bottom))
}
}
pub unsafe fn deallocate_stack(ptr: *mut c_void, _: usize) {
VirtualFree(ptr, 0, MEM_RELEASE);
}
pub fn page_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
let mut ret = PAGE_SIZE.load(Ordering::Relaxed);
if ret == 0 {
ret = unsafe {
let mut info = mem::zeroed();
GetSystemInfo(&mut info);
info.dwPageSize as usize
};
PAGE_SIZE.store(ret, Ordering::Relaxed);
}
ret
}
// Windows does not seem to provide a stack limit API
pub fn min_stack_size() -> usize {
page_size()
}
// Windows does not seem to provide a stack limit API
pub fn max_stack_size() -> usize {
usize::MAX
}

View file

@ -0,0 +1,162 @@
//! # yield
//!
//! generator yield implementation
//!
use std::any::Any;
use std::sync::atomic;
use crate::gen_impl::Generator;
use crate::reg_context::RegContext;
use crate::rt::{is_generator, Context, ContextStack, Error};
/// it's a special return instruction that yield nothing
/// but only terminate the generator safely
#[macro_export]
macro_rules! done {
() => {{
return $crate::done();
}};
}
/// don't use it directly, use done!() macro instead
/// would panic if use in none generator context
#[doc(hidden)]
#[inline]
pub fn done<T>() -> T {
assert!(is_generator(), "done is only possible in a generator");
std::panic::panic_any(Error::Done)
}
/// switch back to parent context
#[inline]
pub fn yield_now() {
let env = ContextStack::current();
let cur = env.top();
raw_yield_now(&env, cur);
}
#[inline]
pub fn raw_yield_now(env: &ContextStack, cur: &mut Context) {
let parent = env.pop_context(cur as *mut _);
RegContext::swap(&mut cur.regs, &parent.regs);
}
/// raw yield without catch passed in para
#[inline]
fn raw_yield<T: Any>(env: &ContextStack, context: &mut Context, v: T) {
// check the context
if !context.is_generator() {
panic!("yield from none generator context");
}
context.set_ret(v);
context._ref -= 1;
raw_yield_now(env, context);
// here we just panic to exit the func
if context._ref != 1 {
std::panic::panic_any(Error::Cancel);
}
}
/// yield something without catch passed in para
#[inline]
#[deprecated(since = "0.6.18", note = "please use `scope` version instead")]
pub fn yield_with<T: Any>(v: T) {
let env = ContextStack::current();
let context = env.top();
raw_yield(&env, context, v);
}
/// get the passed in para
#[inline]
#[deprecated(since = "0.6.18", note = "please use `scope` version instead")]
pub fn get_yield<A: Any>() -> Option<A> {
let context = ContextStack::current().top();
raw_get_yield(context)
}
/// get the passed in para from context
#[inline]
fn raw_get_yield<A: Any>(context: &mut Context) -> Option<A> {
// check the context
if !context.is_generator() {
{
error!("get yield from none generator context");
std::panic::panic_any(Error::ContextErr);
}
}
context.get_para()
}
/// yield and get the send para
// here yield need to return a static lifetime value, which is Any required
// this is fine, but it's totally safe that we can refer to the function block
// since we will come back later
#[inline]
#[deprecated(since = "0.6.18", note = "please use `scope` version instead")]
pub fn yield_<A: Any, T: Any>(v: T) -> Option<A> {
let env = ContextStack::current();
let context = env.top();
raw_yield(&env, context, v);
atomic::compiler_fence(atomic::Ordering::Acquire);
raw_get_yield(context)
}
/// `yield_from`
#[deprecated(since = "0.6.18", note = "please use `scope` version instead")]
pub fn yield_from<A: Any, T: Any>(mut g: Generator<A, T>) -> Option<A> {
let env = ContextStack::current();
let context = env.top();
let mut p = context.get_para();
while !g.is_done() {
match g.raw_send(p) {
None => return None,
Some(r) => raw_yield(&env, context, r),
}
p = context.get_para();
}
drop(g); // explicitly consume g
p
}
/// coroutine yield
pub fn co_yield_with<T: Any>(v: T) {
let env = ContextStack::current();
let context = env.co_ctx().unwrap();
// check the context, already checked in co_ctx()
// if !context.is_generator() {
// info!("yield from none coroutine context");
// // do nothing, just return
// return;
// }
// here we just panic to exit the func
if context._ref != 1 {
std::panic::panic_any(Error::Cancel);
}
context.co_set_ret(v);
context._ref -= 1;
let parent = env.pop_context(context);
let top = unsafe { &mut *context.parent };
// here we should use the top regs
RegContext::swap(&mut top.regs, &parent.regs);
}
/// coroutine get passed in yield para
pub fn co_get_yield<A: Any>() -> Option<A> {
ContextStack::current()
.co_ctx()
.and_then(|ctx| ctx.co_get_para())
}
/// set current coroutine para in user space
pub fn co_set_para<A: Any>(para: A) {
if let Some(ctx) = ContextStack::current().co_ctx() {
ctx.co_set_para(para)
}
}