Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
c171983
Merge pull request #7 from intel/master
wenyongh May 17, 2019
3b9cc32
Merge pull request #8 from intel/master
wenyongh May 17, 2019
a333655
Merge pull request #9 from intel/master
wenyongh May 20, 2019
81d4669
Merge pull request #10 from intel/master
wenyongh May 23, 2019
8d65395
Implement memory profiler, optimize memory usage, modify code indent
wenyongh May 23, 2019
0d60a69
Merge pull request #11 from intel/master
wenyongh May 23, 2019
f32965b
Implement memory.grow and limit heap space base offset to 1G; modify …
wenyongh May 31, 2019
0fede7b
Merge pull request #12 from intel/master
wenyongh May 31, 2019
ee19aff
Merge pull request #13 from intel/master
wenyongh Jun 8, 2019
1772183
Add a new extension library: connection
wenyongh Jun 11, 2019
624c58f
Fix bug of reading magic number and version in big endian platform
wenyongh Jun 14, 2019
c27a72d
Merge pull request #14 from intel/master
wenyongh Jun 14, 2019
5c0c30e
Merge pull request #15 from intel/master
wenyongh Jun 14, 2019
781f2ec
Merge pull request #16 from intel/master
wenyongh Jul 10, 2019
c290de7
Re-org platform APIs: move most platform APIs from iwasm to shared-lib
wenyongh Jul 22, 2019
186800a
Merge pull request #17 from intel/master
wenyongh Jul 22, 2019
d6d0e02
Merge pull request #18 from intel/master
wenyongh Aug 1, 2019
a59616e
Merge pull request #19 from intel/master
wenyongh Aug 2, 2019
d518e64
Enhance wasm loader to fix some security issues
wenyongh Aug 8, 2019
311ce9a
Merge pull request #20 from intel/master
wenyongh Aug 8, 2019
7a08c3d
Merge pull request #21 from intel/master
wenyongh Aug 14, 2019
6a318c4
Fix issue about illegal load of EXC_RETURN into PC on stm32 board
wenyongh Aug 14, 2019
ca601f5
Merge pull request #22 from intel/master
wenyongh Aug 14, 2019
e119204
Updates that let a restricted version of the interpreter run in SGX
wenyongh Aug 19, 2019
7993bc1
Enable native/app address validation and conversion for wasm app
wenyongh Aug 21, 2019
6dc3e98
Remove wasm_application_exectue_* APIs from wasm_export.h which makes…
wenyongh Aug 21, 2019
49429b1
Refine binary size and fix several minor issues
wenyongh Aug 27, 2019
5ed0b0c
Merge pull request #23 from intel/master
wenyongh Aug 27, 2019
76ed0fc
Merge pull request #24 from intel/master
wenyongh Aug 27, 2019
ad5f4fc
Add more strict security checks for libc wrapper API's
wenyongh Aug 28, 2019
4c284ad
Merge pull request #25 from intel/master
wenyongh Aug 28, 2019
34676da
Merge pull request #26 from intel/master
wenyongh Aug 28, 2019
cdaa0de
Use one libc wrapper copy for sgx and other platforms; remove bh_prin…
wenyongh Aug 28, 2019
2f766ab
Merge pull request #27 from intel/master
wenyongh Aug 28, 2019
cfd2c2d
Enhance security of libc strcpy/sprintf wrapper function
wenyongh Aug 29, 2019
1ac087c
Merge pull request #28 from intel/master
wenyongh Sep 6, 2019
95dbaac
Merge pull request #29 from intel/master
wenyongh Sep 8, 2019
1b2dbeb
Merge pull request #30 from intel/master
wenyongh Sep 10, 2019
64d75fa
Fix issue of call native for x86_64/arm/mips, add module inst paramet…
wenyongh Sep 10, 2019
8ece291
Merge pull request #31 from intel/master
wenyongh Sep 10, 2019
4bbf343
Merge pull request #32 from intel/master
wenyongh Sep 10, 2019
ccf65e6
Merge pull request #33 from intel/master
wenyongh Sep 11, 2019
46fa009
Remove get_module_inst() and fix issue of call native
wenyongh Sep 11, 2019
3969ee5
Merge pull request #34 from intel/master
wenyongh Sep 11, 2019
3a696f6
Refine wgl lib: remove module_inst parameter from widget functions; m…
wenyongh Sep 16, 2019
09e8b61
Merge pull request #35 from intel/master
wenyongh Sep 16, 2019
cd896e1
Refine interpreter call native process, refine memory boudary check
wenyongh Sep 19, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 54 additions & 56 deletions core/iwasm/runtime/vmcore-wasm/invokeNative_em64.s
Original file line number Diff line number Diff line change
@@ -1,72 +1,70 @@
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Ivan Volosyuk
//
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.text
.align 2
.globl invokeNative
.type invokeNative, @function
invokeNative:
/* rdi - memory */
/* rsi - n fp args */
/* rdx - n mem args */
/* rcx - function ptr */
/* rdi - function ptr */
/* rsi - argv */
/* rdx - n_stacks */

push %rbp
mov %rsp, %rbp

/* cycle to fill all fp args */
movq 8(%rdi), %xmm0
movq 16(%rdi), %xmm1
movq 24(%rdi), %xmm2
movq 32(%rdi), %xmm3
movq 40(%rdi), %xmm4
movq 48(%rdi), %xmm5
movq 56(%rdi), %xmm6
movq 64(%rdi), %xmm7

mov %rsp, %r10 /* Check that stack is aligned on */
and $8, %r10 /* 16 bytes. This code may be removed */
jz no_abort /* when we are sure that compiler always */
int3 /* calls us with aligned stack */
no_abort:
mov %rdx, %r10 /* Align stack on 16 bytes before pushing */
and $1, %r10 /* stack arguments in case we have an odd */
shl $3, %r10 /* number of stack arguments */
sub %r10, %rsp
mov %rdx, %r10
mov %rsp, %r11 /* Check that stack is aligned on */
and $8, %r11 /* 16 bytes. This code may be removed */
je check_stack_succ /* when we are sure that compiler always */
int3 /* calls us with aligned stack */
check_stack_succ:
mov %r10, %r11 /* Align stack on 16 bytes before pushing */
and $1, %r11 /* stack arguments in case we have an odd */
shl $3, %r11 /* number of stack arguments */
sub %r11, %rsp
/* store memory args */
movq %rcx, %r10 /* func ptr */
movq %rdx, %rcx /* counter */
lea 8+64+48-8(%rdi,%rcx,8), %rdx
sub %rsp, %rdx
movq %rdi, %r11 /* func ptr */
movq %r10, %rcx /* counter */
lea 64+48-8(%rsi,%rcx,8), %r10
sub %rsp, %r10
cmpq $0, %rcx
jz cycle_end
cycle:
push 0(%rsp,%rdx)
loop cycle
cycle_end:
movq 80(%rdi), %rsi
movq 88(%rdi), %rdx
movq 96(%rdi), %rcx
movq 104(%rdi), %r8
movq 112(%rdi), %r9
je push_args_end
push_args:
push 0(%rsp,%r10)
loop push_args
push_args_end:
/* fill all fp args */
movq 0x00(%rsi), %xmm0
movq 0x08(%rsi), %xmm1
movq 0x10(%rsi), %xmm2
movq 0x18(%rsi), %xmm3
movq 0x20(%rsi), %xmm4
movq 0x28(%rsi), %xmm5
movq 0x30(%rsi), %xmm6
movq 0x38(%rsi), %xmm7

movq 72(%rdi), %rdi
/* fill all int args */
movq 0x40(%rsi), %rdi
movq 0x50(%rsi), %rdx
movq 0x58(%rsi), %rcx
movq 0x60(%rsi), %r8
movq 0x68(%rsi), %r9
movq 0x48(%rsi), %rsi

call *%r10
call *%r11
leave
ret

66 changes: 24 additions & 42 deletions core/iwasm/runtime/vmcore-wasm/invokeNative_ia32.s
Original file line number Diff line number Diff line change
@@ -1,56 +1,38 @@
// Copyright (C) 2019 Intel Corporation. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: Ivan Volosyuk
//
.text
.align 2
.globl invokeNative
.type invokeNative, @function
invokeNative:

push %ebp
movl %esp, %ebp
push %ecx
movl 8(%ebp), %eax /* eax = argv */
movl 12(%ebp), %ecx /* ecx = argc */
movl 16(%ebp), %ecx /* ecx = argc */
movl 12(%ebp), %edx /* edx = argv */
test %ecx, %ecx
je restore_ecx /* if ecx == 0, skip pushing arguments */
leal -4(%eax,%ecx,4), %eax /* eax = eax + ecx * 4 - 4 */
subl %esp, %eax /* eax = eax - esp */
jz skip_push_args /* if ecx == 0, skip pushing arguments */
leal -4(%edx,%ecx,4), %edx /* edx = edx + ecx * 4 - 4 */
subl %esp, %edx /* edx = edx - esp */
1:
push 0(%esp,%eax)
push 0(%esp,%edx)
loop 1b /* loop ecx counts */
restore_ecx:
movl -4(%ebp), %ecx /* restore ecx */
movl 16(%ebp), %eax /* eax = func_ptr */
call *%eax
skip_push_args:
movl 8(%ebp), %edx /* edx = func_ptr */
call *%edx
leave
ret

48 changes: 26 additions & 22 deletions core/iwasm/runtime/vmcore-wasm/wasm_interp.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,17 @@ GET_F64_FROM_ADDR (uint32 *addr)
#endif /* WASM_CPU_SUPPORTS_UNALIGNED_64BIT_ACCESS != 0 */

#if WASM_ENABLE_EXT_MEMORY_SPACE != 0
#define CHECK_EXT_MEMORY_SPACE() \
#define CHECK_EXT_MEMORY_SPACE() \
else if (module->ext_mem_data \
&& module->ext_mem_base_offset <= offset1 \
&& offset1 < module->ext_mem_base_offset \
+ module->ext_mem_size) { \
/* If offset1 is in valid range, maddr must also be in valid range, \
no need to check it again. */ \
maddr = module->ext_mem_data \
+ (offset1 - module->ext_mem_base_offset); \
if (maddr < module->ext_mem_data) \
goto out_of_bounds; \
maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD]; \
if (maddr1 > module->ext_mem_data_end) \
if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] > \
module->ext_mem_data_end) \
goto out_of_bounds; \
}
#else
Expand All @@ -94,26 +94,25 @@ GET_F64_FROM_ADDR (uint32 *addr)

#define CHECK_MEMORY_OVERFLOW() do { \
uint32 offset1 = offset + addr; \
uint8 *maddr1; \
if (flags != 2) \
LOG_VERBOSE("unaligned load/store in wasm interp, flag is: %d.\n", flags);\
if (offset1 < offset) \
goto out_of_bounds; \
if (offset1 < heap_base_offset) { \
/* if (flags != 2) \
LOG_VERBOSE("unaligned load/store in wasm interp, flag: %d.\n", flags); */\
/* The WASM spec doesn't require that the dynamic address operand must be \
unsigned, so we don't check whether integer overflow or not here. */ \
/* if (offset1 < offset) \
goto out_of_bounds; */ \
if (offset1 < memory_data_size) { \
/* If offset1 is in valid range, maddr must also be in valid range, \
no need to check it again. */ \
maddr = memory->memory_data + offset1; \
if (maddr < memory->base_addr) \
goto out_of_bounds; \
maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD]; \
if (maddr1 > memory->end_addr) \
if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] > memory->end_addr) \
goto out_of_bounds; \
} \
else if (offset1 < memory->heap_base_offset \
+ (memory->heap_data_end - memory->heap_data)) { \
else if (offset1 > heap_base_offset \
&& offset1 < heap_base_offset + heap_data_size) { \
/* If offset1 is in valid range, maddr must also be in valid range, \
no need to check it again. */ \
maddr = memory->heap_data + offset1 - memory->heap_base_offset; \
if (maddr < memory->heap_data) \
goto out_of_bounds; \
maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD]; \
if (maddr1 > memory->heap_data_end) \
if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] > memory->heap_data_end) \
goto out_of_bounds; \
} \
CHECK_EXT_MEMORY_SPACE() \
Expand Down Expand Up @@ -684,7 +683,11 @@ wasm_interp_call_func_bytecode(WASMThread *self,
{
WASMModuleInstance *module = self->module_inst;
WASMMemoryInstance *memory = module->default_memory;
int32 heap_base_offset = memory ? memory->heap_base_offset : 0;
uint32 memory_data_size = memory
? NumBytesPerPage * memory->cur_page_count : 0;
uint32 heap_base_offset = memory ? memory->heap_base_offset : 0;
uint32 heap_data_size = memory
? memory->heap_data_end - memory->heap_data : 0;
WASMTableInstance *table = module->default_table;
uint8 opcode_IMPDEP2 = WASM_OP_IMPDEP2;
WASMInterpFrame *frame = NULL;
Expand Down Expand Up @@ -1302,6 +1305,7 @@ wasm_interp_call_func_bytecode(WASMThread *self,
PUSH_I32(prev_page_count);
/* update the memory instance ptr */
memory = module->default_memory;
memory_data_size = NumBytesPerPage * memory->cur_page_count;
}

(void)reserved;
Expand Down
46 changes: 23 additions & 23 deletions core/iwasm/runtime/vmcore-wasm/wasm_runtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -1459,13 +1459,13 @@ word_copy(uint32 *dest, uint32 *src, unsigned num)
#if !defined(__x86_64__) && !defined(__amd_64__)

typedef void (*GenericFunctionPointer)();
int64 invokeNative(uint32 *args, uint32 sz, GenericFunctionPointer f);
int64 invokeNative(GenericFunctionPointer f, uint32 *args, uint32 sz);

typedef float64 (*Float64FuncPtr)(uint32*, uint32, GenericFunctionPointer);
typedef float32 (*Float32FuncPtr)(uint32*, uint32, GenericFunctionPointer);
typedef int64 (*Int64FuncPtr)(uint32*, uint32, GenericFunctionPointer);
typedef int32 (*Int32FuncPtr)(uint32*, uint32, GenericFunctionPointer);
typedef void (*VoidFuncPtr)(uint32*, uint32, GenericFunctionPointer);
typedef float64 (*Float64FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
typedef float32 (*Float32FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
typedef int64 (*Int64FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
typedef int32 (*Int32FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
typedef void (*VoidFuncPtr)(GenericFunctionPointer f, uint32*, uint32);

static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)invokeNative;
static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)invokeNative;
Expand Down Expand Up @@ -1528,21 +1528,21 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,

argc1 = j;
if (func_type->result_count == 0) {
invokeNative_Void(argv1, argc1, func_ptr);
invokeNative_Void(func_ptr, argv1, argc1);
}
else {
switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32:
ret[0] = invokeNative_Int32(argv1, argc1, func_ptr);
ret[0] = invokeNative_Int32(func_ptr, argv1, argc1);
break;
case VALUE_TYPE_I64:
PUT_I64_TO_ADDR(ret, invokeNative_Int64(argv1, argc1, func_ptr));
PUT_I64_TO_ADDR(ret, invokeNative_Int64(func_ptr, argv1, argc1));
break;
case VALUE_TYPE_F32:
*(float32*)ret = invokeNative_Float32(argv1, argc1, func_ptr);
*(float32*)ret = invokeNative_Float32(func_ptr, argv1, argc1);
break;
case VALUE_TYPE_F64:
PUT_F64_TO_ADDR(ret, invokeNative_Float64(argv1, argc1, func_ptr));
PUT_F64_TO_ADDR(ret, invokeNative_Float64(func_ptr, argv1, argc1));
break;
default:
wasm_assert(0);
Expand All @@ -1558,13 +1558,13 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
#else /* else of !defined(__x86_64__) && !defined(__amd_64__) */

typedef void (*GenericFunctionPointer)();
int64 invokeNative(uint64 *args, uint64 n_fps, uint64 n_stacks, GenericFunctionPointer f);
int64 invokeNative(GenericFunctionPointer f, uint64 *args, uint64 n_stacks);

typedef float64 (*Float64FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
typedef float32 (*Float32FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
typedef int64 (*Int64FuncPtr)(uint64*,uint64, uint64, GenericFunctionPointer);
typedef int32 (*Int32FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
typedef void (*VoidFuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
typedef float64 (*Float64FuncPtr)(GenericFunctionPointer, uint64*, uint64);
typedef float32 (*Float32FuncPtr)(GenericFunctionPointer, uint64*, uint64);
typedef int64 (*Int64FuncPtr)(GenericFunctionPointer, uint64*,uint64);
typedef int32 (*Int32FuncPtr)(GenericFunctionPointer, uint64*, uint64);
typedef void (*VoidFuncPtr)(GenericFunctionPointer, uint64*, uint64);

static Float64FuncPtr invokeNative_Float64 = (Float64FuncPtr)invokeNative;
static Float32FuncPtr invokeNative_Float32 = (Float32FuncPtr)invokeNative;
Expand Down Expand Up @@ -1604,7 +1604,7 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
}
}

fps = argv1 + 1;
fps = argv1;
ints = fps + MAX_REG_FLOATS;
stacks = ints + MAX_REG_INTS;

Expand Down Expand Up @@ -1645,21 +1645,21 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
}

if (func_type->result_count == 0) {
invokeNative_Void(argv1, n_fps, n_stacks, func_ptr);
invokeNative_Void(func_ptr, argv1, n_stacks);
}
else {
switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32:
ret[0] = invokeNative_Int32(argv1, n_fps, n_stacks, func_ptr);
ret[0] = invokeNative_Int32(func_ptr, argv1, n_stacks);
break;
case VALUE_TYPE_I64:
PUT_I64_TO_ADDR(ret, invokeNative_Int64(argv1, n_fps, n_stacks, func_ptr));
PUT_I64_TO_ADDR(ret, invokeNative_Int64(func_ptr, argv1, n_stacks));
break;
case VALUE_TYPE_F32:
*(float32*)ret = invokeNative_Float32(argv1, n_fps, n_stacks, func_ptr);
*(float32*)ret = invokeNative_Float32(func_ptr, argv1, n_stacks);
break;
case VALUE_TYPE_F64:
PUT_F64_TO_ADDR(ret, invokeNative_Float64(argv1, n_fps, n_stacks, func_ptr));
PUT_F64_TO_ADDR(ret, invokeNative_Float64(func_ptr, argv1, n_stacks));
break;
default:
wasm_assert(0);
Expand Down