summaryrefslogtreecommitdiff
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/asmthumb.c13
-rw-r--r--py/builtin.c16
-rw-r--r--py/builtin.h2
-rw-r--r--py/builtintables.c5
-rw-r--r--py/compile.c147
-rw-r--r--py/emit.h6
-rw-r--r--py/emitbc.c4
-rw-r--r--py/emitcpy.c4
-rw-r--r--py/emitglue.c11
-rw-r--r--py/emitglue.h3
-rw-r--r--py/emitinlinethumb.c2
-rw-r--r--py/emitnative.c365
-rw-r--r--py/modcmath.c8
-rw-r--r--py/mpconfig.h8
-rw-r--r--py/obj.c10
-rw-r--r--py/obj.h3
-rw-r--r--py/objcomplex.c10
-rw-r--r--py/objfun.c61
-rw-r--r--py/objint.c2
-rw-r--r--py/objlist.c3
-rw-r--r--py/objrange.c64
-rw-r--r--py/objreversed.c74
-rw-r--r--py/objstr.c26
-rw-r--r--py/objtuple.c7
-rw-r--r--py/parse.c2
-rw-r--r--py/py.mk4
-rw-r--r--py/qstrdefs.h49
-rw-r--r--py/runtime.c52
-rw-r--r--py/runtime.h7
-rw-r--r--py/runtime0.h17
-rw-r--r--py/scope.c43
31 files changed, 719 insertions, 309 deletions
diff --git a/py/asmthumb.c b/py/asmthumb.c
index a01599454..75ce168f7 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -496,17 +496,14 @@ void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp
asm_thumb_op16(as, 0x4780 | (REG_R9 << 3)); // blx reg
*/
- if (0) {
- // load ptr to function into register using immediate, then branch
- // not relocatable
- asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr);
- asm_thumb_op16(as, OP_BLX(reg_temp));
- } else if (1) {
+ if (fun_id < 32) {
+ // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes
asm_thumb_op16(as, OP_FORMAT_9_10(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, REG_R7, fun_id));
asm_thumb_op16(as, OP_BLX(reg_temp));
} else {
- // use SVC
- asm_thumb_op16(as, OP_SVC(fun_id));
+ // load ptr to function into register using immediate; 6 bytes
+ asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr);
+ asm_thumb_op16(as, OP_BLX(reg_temp));
}
}
diff --git a/py/builtin.c b/py/builtin.c
index 88a724fcd..1924e6080 100644
--- a/py/builtin.c
+++ b/py/builtin.c
@@ -99,7 +99,7 @@ STATIC mp_obj_t mp_builtin___repl_print__(mp_obj_t o) {
MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj, mp_builtin___repl_print__);
-mp_obj_t mp_builtin_abs(mp_obj_t o_in) {
+STATIC mp_obj_t mp_builtin_abs(mp_obj_t o_in) {
if (MP_OBJ_IS_SMALL_INT(o_in)) {
mp_int_t val = MP_OBJ_SMALL_INT_VALUE(o_in);
if (val < 0) {
@@ -284,17 +284,6 @@ STATIC mp_obj_t mp_builtin_iter(mp_obj_t o_in) {
MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_iter_obj, mp_builtin_iter);
-STATIC mp_obj_t mp_builtin_len(mp_obj_t o_in) {
- mp_obj_t len = mp_obj_len_maybe(o_in);
- if (len == NULL) {
- nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "object of type '%s' has no len()", mp_obj_get_type_str(o_in)));
- } else {
- return len;
- }
-}
-
-MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_len_obj, mp_builtin_len);
-
STATIC mp_obj_t mp_builtin_max(uint n_args, const mp_obj_t *args) {
if (n_args == 1) {
// given an iterable
@@ -569,6 +558,7 @@ STATIC mp_obj_t mp_builtin_hasattr(mp_obj_t object_in, mp_obj_t attr_in) {
MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj, mp_builtin_hasattr);
-// These two are defined in terms of MicroPython API functions right away
+// These are defined in terms of MicroPython API functions right away
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_len_obj, mp_obj_len);
MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_globals_obj, mp_globals_get);
MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_locals_obj, mp_locals_get);
diff --git a/py/builtin.h b/py/builtin.h
index 425cfec09..af428d0c0 100644
--- a/py/builtin.h
+++ b/py/builtin.h
@@ -26,6 +26,7 @@
mp_obj_t mp_builtin___import__(uint n_args, mp_obj_t *args);
mp_obj_t mp_builtin_open(uint n_args, const mp_obj_t *args);
+mp_obj_t mp_builtin_len(mp_obj_t o);
MP_DECLARE_CONST_FUN_OBJ(mp_builtin___build_class___obj);
MP_DECLARE_CONST_FUN_OBJ(mp_builtin___import___obj);
@@ -88,3 +89,4 @@ extern struct _dummy_t mp_sys_stderr_obj;
// extmod modules
extern const mp_obj_module_t mp_module_uctypes;
+extern const mp_obj_module_t mp_module_zlibd;
diff --git a/py/builtintables.c b/py/builtintables.c
index ff530b93b..08b6b1649 100644
--- a/py/builtintables.c
+++ b/py/builtintables.c
@@ -66,6 +66,7 @@ STATIC const mp_map_elem_t mp_builtin_object_table[] = {
{ MP_OBJ_NEW_QSTR(MP_QSTR_property), (mp_obj_t)&mp_type_property },
#endif
{ MP_OBJ_NEW_QSTR(MP_QSTR_range), (mp_obj_t)&mp_type_range },
+ { MP_OBJ_NEW_QSTR(MP_QSTR_reversed), (mp_obj_t)&mp_type_reversed },
#if MICROPY_PY_BUILTINS_SET
{ MP_OBJ_NEW_QSTR(MP_QSTR_set), (mp_obj_t)&mp_type_set },
#endif
@@ -137,6 +138,7 @@ STATIC const mp_map_elem_t mp_builtin_object_table[] = {
{ MP_OBJ_NEW_QSTR(MP_QSTR_StopIteration), (mp_obj_t)&mp_type_StopIteration },
{ MP_OBJ_NEW_QSTR(MP_QSTR_SyntaxError), (mp_obj_t)&mp_type_SyntaxError },
{ MP_OBJ_NEW_QSTR(MP_QSTR_SystemError), (mp_obj_t)&mp_type_SystemError },
+ { MP_OBJ_NEW_QSTR(MP_QSTR_SystemExit), (mp_obj_t)&mp_type_SystemExit },
{ MP_OBJ_NEW_QSTR(MP_QSTR_TypeError), (mp_obj_t)&mp_type_TypeError },
{ MP_OBJ_NEW_QSTR(MP_QSTR_ValueError), (mp_obj_t)&mp_type_ValueError },
{ MP_OBJ_NEW_QSTR(MP_QSTR_ZeroDivisionError), (mp_obj_t)&mp_type_ZeroDivisionError },
@@ -195,6 +197,9 @@ STATIC const mp_map_elem_t mp_builtin_module_table[] = {
#if MICROPY_PY_UCTYPES
{ MP_OBJ_NEW_QSTR(MP_QSTR_uctypes), (mp_obj_t)&mp_module_uctypes },
#endif
+#if MICROPY_PY_ZLIBD
+ { MP_OBJ_NEW_QSTR(MP_QSTR_zlibd), (mp_obj_t)&mp_module_zlibd },
+#endif
// extra builtin modules as defined by a port
MICROPY_PORT_BUILTIN_MODULES
diff --git a/py/compile.c b/py/compile.c
index e89554a4f..10bfb1f3c 100644
--- a/py/compile.c
+++ b/py/compile.c
@@ -48,8 +48,6 @@
// TODO need to mangle __attr names
-#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_THUMB)
-
typedef enum {
PN_none = 0,
#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
@@ -354,7 +352,7 @@ STATIC mp_parse_node_t fold_constants(compiler_t *comp, mp_parse_node_t pn, mp_m
}
STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra);
-void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind);
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind);
STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn);
STATIC uint comp_next_label(compiler_t *comp) {
@@ -729,9 +727,9 @@ STATIC void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int la
}
typedef enum { ASSIGN_STORE, ASSIGN_AUG_LOAD, ASSIGN_AUG_STORE } assign_kind_t;
-void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t kind);
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t kind);
-void c_assign_power(compiler_t *comp, mp_parse_node_struct_t *pns, assign_kind_t assign_kind) {
+STATIC void c_assign_power(compiler_t *comp, mp_parse_node_struct_t *pns, assign_kind_t assign_kind) {
if (assign_kind != ASSIGN_AUG_STORE) {
compile_node(comp, pns->nodes[0]);
}
@@ -792,7 +790,7 @@ cannot_assign:
}
// we need to allow for a caller passing in 1 initial node (node_head) followed by an array of nodes (nodes_tail)
-void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num_tail, mp_parse_node_t *nodes_tail) {
+STATIC void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num_tail, mp_parse_node_t *nodes_tail) {
uint num_head = (node_head == MP_PARSE_NODE_NULL) ? 0 : 1;
// look for star expression
@@ -832,7 +830,7 @@ void c_assign_tuple(compiler_t *comp, mp_parse_node_t node_head, uint num_tail,
}
// assigns top of stack to pn
-void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
tail_recursion:
if (MP_PARSE_NODE_IS_NULL(pn)) {
assert(0);
@@ -947,7 +945,7 @@ void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
// if n_pos_defaults > 0 then there is a tuple on the stack with the positional defaults
// if n_kw_defaults > 0 then there is a dictionary on the stack with the keyword defaults
// if both exist, the tuple is above the dictionary (ie the first pop gets the tuple)
-void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_defaults, int n_kw_defaults) {
+STATIC void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_defaults, int n_kw_defaults) {
assert(n_pos_defaults >= 0);
assert(n_kw_defaults >= 0);
@@ -982,7 +980,7 @@ void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_d
}
}
-void compile_funcdef_param(compiler_t *comp, mp_parse_node_t pn) {
+STATIC void compile_funcdef_param(compiler_t *comp, mp_parse_node_t pn) {
if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_typedargslist_star)) {
comp->have_star = true;
/* don't need to distinguish bare from named star
@@ -1254,7 +1252,7 @@ void compile_funcdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
EMIT_ARG(store_id, fname);
}
-void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) {
+STATIC void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) {
if (MP_PARSE_NODE_IS_ID(pn)) {
EMIT_ARG(delete_id, MP_PARSE_NODE_LEAF_ARG(pn));
} else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_power)) {
@@ -1406,7 +1404,7 @@ void compile_raise_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
// q_base holds the base of the name
// eg a -> q_base=a
// a.b.c -> q_base=a
-void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
+STATIC void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
bool is_as = false;
if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_as_name)) {
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
@@ -1466,7 +1464,7 @@ void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
}
}
-void compile_dotted_as_name(compiler_t *comp, mp_parse_node_t pn) {
+STATIC void compile_dotted_as_name(compiler_t *comp, mp_parse_node_t pn) {
EMIT_ARG(load_const_small_int, 0); // level 0 import
EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // not importing from anything
qstr q_base;
@@ -1745,10 +1743,11 @@ void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
EMIT_ARG(label_assign, break_label);
}
+#if !MICROPY_EMIT_CPYTHON
// TODO preload end and step onto stack if they are not constants
// Note that, as per semantics of for .. range, the final failing value should not be stored in the loop variable
// And, if the loop never runs, the loop variable should never be assigned
-void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var, mp_parse_node_t pn_start, mp_parse_node_t pn_end, mp_parse_node_t pn_step, mp_parse_node_t pn_body, mp_parse_node_t pn_else) {
+STATIC void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var, mp_parse_node_t pn_start, mp_parse_node_t pn_end, mp_parse_node_t pn_step, mp_parse_node_t pn_body, mp_parse_node_t pn_else) {
START_BREAK_CONTINUE_BLOCK
// note that we don't need to pop anything when breaking from an optimise for loop
@@ -1801,6 +1800,7 @@ void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var,
EMIT_ARG(label_assign, break_label);
}
+#endif
void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
#if !MICROPY_EMIT_CPYTHON
@@ -1884,7 +1884,7 @@ void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
EMIT_ARG(label_assign, end_label);
}
-void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_excepts, mp_parse_node_t pn_else) {
+STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_excepts, mp_parse_node_t pn_else) {
// setup code
uint l1 = comp_next_label(comp);
uint success_label = comp_next_label(comp);
@@ -1976,7 +1976,7 @@ void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except,
EMIT_ARG(label_assign, l2);
}
-void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) {
+STATIC void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) {
uint l_finally_block = comp_next_label(comp);
EMIT_ARG(setup_finally, l_finally_block);
@@ -2028,7 +2028,7 @@ void compile_try_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
}
}
-void compile_with_stmt_helper(compiler_t *comp, int n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+STATIC void compile_with_stmt_helper(compiler_t *comp, int n, mp_parse_node_t *nodes, mp_parse_node_t body) {
if (n == 0) {
// no more pre-bits, compile the body of the with
compile_node(comp, body);
@@ -2178,7 +2178,7 @@ void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
}
}
-void c_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns, mp_binary_op_t binary_op) {
+STATIC void c_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns, mp_binary_op_t binary_op) {
int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
compile_node(comp, pns->nodes[0]);
for (int i = 1; i < num_nodes; i += 1) {
@@ -2562,7 +2562,7 @@ void compile_atom_string(compiler_t *comp, mp_parse_node_struct_t *pns) {
}
// pns needs to have 2 nodes, first is lhs of comprehension, second is PN_comp_for node
-void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind) {
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind) {
assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t*)pns->nodes[1];
@@ -2857,7 +2857,7 @@ STATIC compile_function_t compile_function[] = {
#undef DEF_RULE
};
-void compile_node(compiler_t *comp, mp_parse_node_t pn) {
+STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn) {
if (MP_PARSE_NODE_IS_NULL(pn)) {
// pass
} else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
@@ -2902,11 +2902,10 @@ void compile_node(compiler_t *comp, mp_parse_node_t pn) {
}
}
-void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star, bool allow_annotations) {
+STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star) {
// TODO verify that *k and **k are last etc
qstr param_name = MP_QSTR_NULL;
uint param_flag = ID_FLAG_IS_PARAM;
- mp_parse_node_t pn_annotation = MP_PARSE_NODE_NULL;
if (MP_PARSE_NODE_IS_ID(pn)) {
param_name = MP_PARSE_NODE_LEAF_ARG(pn);
if (comp->have_star) {
@@ -2921,24 +2920,6 @@ void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_ki
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_name) {
param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
- //int node_index = 1; unused
- if (allow_annotations) {
- if (!MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
- // this parameter has an annotation
- pn_annotation = pns->nodes[1];
- }
- //node_index = 2; unused
- }
- /* this is obsolete now that num dict/default params are calculated in compile_funcdef_param
- if (!MP_PARSE_NODE_IS_NULL(pns->nodes[node_index])) {
- // this parameter has a default value
- if (comp->have_star) {
- comp->scope_cur->num_dict_params += 1;
- } else {
- comp->scope_cur->num_default_params += 1;
- }
- }
- */
if (comp->have_star) {
// comes after a star, so counts as a keyword-only parameter
comp->scope_cur->num_kwonly_args += 1;
@@ -2957,12 +2938,11 @@ void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_ki
// named star
comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
- } else if (allow_annotations && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)) {
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)) {
// named star with possible annotation
comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
pns = (mp_parse_node_struct_t*)pns->nodes[0];
param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
- pn_annotation = pns->nodes[1];
} else {
// shouldn't happen
assert(0);
@@ -2970,10 +2950,6 @@ void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_ki
} else if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_dbl_star) {
param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_DBL_STAR_PARAM;
- if (allow_annotations && !MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
- // this parameter has an annotation
- pn_annotation = pns->nodes[1];
- }
comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARKEYWORDS;
} else {
// TODO anything to implement?
@@ -2982,9 +2958,6 @@ void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_ki
}
if (param_name != MP_QSTR_NULL) {
- if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
- // TODO this parameter has an annotation
- }
bool added;
id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, &added);
if (!added) {
@@ -2997,14 +2970,61 @@ void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_ki
}
STATIC void compile_scope_func_param(compiler_t *comp, mp_parse_node_t pn) {
- compile_scope_func_lambda_param(comp, pn, PN_typedargslist_name, PN_typedargslist_star, PN_typedargslist_dbl_star, true);
+ compile_scope_func_lambda_param(comp, pn, PN_typedargslist_name, PN_typedargslist_star, PN_typedargslist_dbl_star);
}
STATIC void compile_scope_lambda_param(compiler_t *comp, mp_parse_node_t pn) {
- compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star, false);
+ compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star);
+}
+
+STATIC void compile_scope_func_annotations(compiler_t *comp, mp_parse_node_t pn) {
+ if (!MP_PARSE_NODE_IS_STRUCT(pn)) {
+ // no annotation
+ return;
+ }
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_name) {
+ // named parameter with possible annotation
+ // fallthrough
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_star) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)) {
+ // named star with possible annotation
+ pns = (mp_parse_node_struct_t*)pns->nodes[0];
+ // fallthrough
+ } else {
+ // no annotation
+ return;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_dbl_star) {
+ // double star with possible annotation
+ // fallthrough
+ } else {
+ // no annotation
+ return;
+ }
+
+ mp_parse_node_t pn_annotation = pns->nodes[1];
+
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ #if MICROPY_EMIT_NATIVE
+ qstr param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ id_info_t *id_info = scope_find(comp->scope_cur, param_name);
+ assert(id_info != NULL);
+
+ if (comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER) {
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr arg_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ARG, id_info->local_num, arg_type);
+ } else {
+ compile_syntax_error(comp, pn_annotation, "parameter annotation must be an identifier");
+ }
+ }
+ #endif // MICROPY_EMIT_NATIVE
+ }
}
-void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_t pn_iter, mp_parse_node_t pn_inner_expr, int l_top, int for_depth) {
+STATIC void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_t pn_iter, mp_parse_node_t pn_inner_expr, int l_top, int for_depth) {
tail_recursion:
if (MP_PARSE_NODE_IS_NULL(pn_iter)) {
// no more nested if/for; compile inner expression
@@ -3128,10 +3148,29 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
if (comp->pass == MP_PASS_SCOPE) {
comp->have_star = false;
apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param);
+ } else {
+ // compile annotations; only needed on latter compiler passes
+
+ // argument annotations
+ apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_annotations);
+
+ // pns->nodes[2] is return/whole function annotation
+ mp_parse_node_t pn_annotation = pns->nodes[2];
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ #if MICROPY_EMIT_NATIVE
+ if (scope->emit_options == MP_EMIT_OPT_VIPER) {
+ // nodes[2] can be null or a test-expr
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_RETURN, 0, ret_type);
+ } else {
+ compile_syntax_error(comp, pn_annotation, "return annotation must be an identifier");
+ }
+ }
+ #endif // MICROPY_EMIT_NATIVE
+ }
}
- // pns->nodes[2] is return/whole function annotation
-
compile_node(comp, pns->nodes[3]); // 3 is function body
// emit return if it wasn't the last opcode
if (!EMIT(last_emit_was_return_value)) {
@@ -3589,7 +3628,7 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
comp->emit_method_table = &emit_native_thumb_method_table;
#endif
comp->emit = emit_native;
- comp->emit_method_table->set_native_types(comp->emit, s->emit_options == MP_EMIT_OPT_VIPER);
+ EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ENABLE, s->emit_options == MP_EMIT_OPT_VIPER, 0);
// native emitters need an extra pass to compute stack size
compile_scope(comp, s, MP_PASS_STACK_SIZE);
diff --git a/py/emit.h b/py/emit.h
index c760344c1..9a709a5c9 100644
--- a/py/emit.h
+++ b/py/emit.h
@@ -46,10 +46,14 @@ typedef enum {
#define MP_EMIT_BREAK_FROM_FOR (0x8000)
+#define MP_EMIT_NATIVE_TYPE_ENABLE (0)
+#define MP_EMIT_NATIVE_TYPE_RETURN (1)
+#define MP_EMIT_NATIVE_TYPE_ARG (2)
+
typedef struct _emit_t emit_t;
typedef struct _emit_method_table_t {
- void (*set_native_types)(emit_t *emit, bool do_native_types);
+ void (*set_native_type)(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2);
void (*start_pass)(emit_t *emit, pass_kind_t pass, scope_t *scope);
void (*end_pass)(emit_t *emit);
bool (*last_emit_was_return_value)(emit_t *emit);
diff --git a/py/emitbc.c b/py/emitbc.c
index 161e8c7f6..a1eacb298 100644
--- a/py/emitbc.c
+++ b/py/emitbc.c
@@ -265,7 +265,7 @@ STATIC void emit_write_bytecode_byte_signed_label(emit_t* emit, byte b1, uint la
c[2] = bytecode_offset >> 8;
}
-STATIC void emit_bc_set_native_types(emit_t *emit, bool do_native_types) {
+STATIC void emit_bc_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) {
}
STATIC void emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
@@ -872,7 +872,7 @@ STATIC void emit_bc_end_except_handler(emit_t *emit) {
}
const emit_method_table_t emit_bc_method_table = {
- emit_bc_set_native_types,
+ emit_bc_set_native_type,
emit_bc_start_pass,
emit_bc_end_pass,
emit_bc_last_emit_was_return_value,
diff --git a/py/emitcpy.c b/py/emitcpy.c
index c0d15a871..4d0c08522 100644
--- a/py/emitcpy.c
+++ b/py/emitcpy.c
@@ -63,7 +63,7 @@ emit_t *emit_cpython_new(uint max_num_labels) {
return emit;
}
-STATIC void emit_cpy_set_native_types(emit_t *emit, bool do_native_types) {
+STATIC void emit_cpy_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) {
}
STATIC void emit_cpy_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
@@ -822,7 +822,7 @@ STATIC void emit_cpy_setup_loop(emit_t *emit, uint label) {
}
const emit_method_table_t emit_cpython_method_table = {
- emit_cpy_set_native_types,
+ emit_cpy_set_native_type,
emit_cpy_start_pass,
emit_cpy_end_pass,
emit_cpy_last_emit_was_return_value,
diff --git a/py/emitglue.c b/py/emitglue.c
index 91570bc63..5be54a6fc 100644
--- a/py/emitglue.c
+++ b/py/emitglue.c
@@ -86,12 +86,14 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, byte *code, uint len, uint
#endif
}
-void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun, uint len, int n_args) {
+#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun, uint len, int n_args, mp_uint_t type_sig) {
assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
rc->kind = kind;
rc->scope_flags = 0;
rc->n_pos_args = n_args;
rc->u_native.fun = fun;
+ rc->u_native.type_sig = type_sig;
#ifdef DEBUG_PRINT
DEBUG_printf("assign native: kind=%d fun=%p len=%u n_args=%d\n", kind, fun, len, n_args);
@@ -111,6 +113,7 @@ void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void
#endif
#endif
}
+#endif
mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) {
DEBUG_OP_printf("make_function_from_raw_code %p\n", rc);
@@ -128,13 +131,19 @@ mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp
case MP_CODE_BYTECODE:
fun = mp_obj_new_fun_bc(rc->scope_flags, rc->arg_names, rc->n_pos_args, rc->n_kwonly_args, def_args, def_kw_args, rc->u_byte.code);
break;
+ #if MICROPY_EMIT_NATIVE
case MP_CODE_NATIVE_PY:
fun = mp_make_function_n(rc->n_pos_args, rc->u_native.fun);
break;
case MP_CODE_NATIVE_VIPER:
+ fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->u_native.fun, rc->u_native.type_sig);
+ break;
+ #endif
+ #if MICROPY_EMIT_INLINE_THUMB
case MP_CODE_NATIVE_ASM:
fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->u_native.fun);
break;
+ #endif
default:
// raw code was never set (this should not happen)
assert(0);
diff --git a/py/emitglue.h b/py/emitglue.h
index c6cbb6283..f8363465e 100644
--- a/py/emitglue.h
+++ b/py/emitglue.h
@@ -48,6 +48,7 @@ typedef struct _mp_code_t {
} u_byte;
struct {
void *fun;
+ mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
} u_native;
};
} mp_raw_code_t;
@@ -55,7 +56,7 @@ typedef struct _mp_code_t {
mp_raw_code_t *mp_emit_glue_new_raw_code(void);
void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, byte *code, uint len, uint n_pos_args, uint n_kwonly_args, qstr *arg_names, uint scope_flags);
-void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *f, uint len, int n_args);
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *f, uint len, int n_args, mp_uint_t type_sig);
mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args);
mp_obj_t mp_make_closure_from_raw_code(mp_raw_code_t *rc, uint n_closed_over, const mp_obj_t *args);
diff --git a/py/emitinlinethumb.c b/py/emitinlinethumb.c
index 3d0efd564..89e128c08 100644
--- a/py/emitinlinethumb.c
+++ b/py/emitinlinethumb.c
@@ -99,7 +99,7 @@ STATIC bool emit_inline_thumb_end_pass(emit_inline_asm_t *emit) {
if (emit->pass == MP_PASS_EMIT) {
void *f = asm_thumb_get_code(emit->as);
- mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, asm_thumb_get_code_size(emit->as), emit->scope->num_pos_args);
+ mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, asm_thumb_get_code_size(emit->as), emit->scope->num_pos_args, 0);
}
return emit->success;
diff --git a/py/emitnative.c b/py/emitnative.c
index dde582d09..8385f9905 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -126,10 +126,11 @@ typedef enum {
} stack_info_kind_t;
typedef enum {
+ VTYPE_PYOBJ = MP_NATIVE_TYPE_OBJ,
+ VTYPE_BOOL = MP_NATIVE_TYPE_BOOL,
+ VTYPE_INT = MP_NATIVE_TYPE_INT,
+ VTYPE_UINT = MP_NATIVE_TYPE_UINT,
VTYPE_UNBOUND,
- VTYPE_PYOBJ,
- VTYPE_BOOL,
- VTYPE_INT,
VTYPE_PTR,
VTYPE_PTR_NONE,
VTYPE_BUILTIN_V_INT,
@@ -149,6 +150,8 @@ struct _emit_t {
bool do_viper_types;
+ vtype_kind_t return_vtype;
+
uint local_vtype_alloc;
vtype_kind_t *local_vtype;
@@ -190,8 +193,30 @@ void EXPORT_FUN(free)(emit_t *emit) {
m_del_obj(emit_t, emit);
}
-STATIC void emit_native_set_viper_types(emit_t *emit, bool do_viper_types) {
- emit->do_viper_types = do_viper_types;
+STATIC void emit_native_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) {
+ switch (op) {
+ case MP_EMIT_NATIVE_TYPE_ENABLE:
+ emit->do_viper_types = arg1;
+ break;
+
+ default: {
+ vtype_kind_t type;
+ switch (arg2) {
+ case MP_QSTR_object: type = VTYPE_PYOBJ; break;
+ case MP_QSTR_bool: type = VTYPE_BOOL; break;
+ case MP_QSTR_int: type = VTYPE_INT; break;
+ case MP_QSTR_uint: type = VTYPE_UINT; break;
+ default: printf("ViperTypeError: unknown type %s\n", qstr_str(arg2)); return;
+ }
+ if (op == MP_EMIT_NATIVE_TYPE_RETURN) {
+ emit->return_vtype = type;
+ } else {
+ assert(arg1 < emit->local_vtype_alloc);
+ emit->local_vtype[arg1] = type;
+ }
+ break;
+ }
+ }
}
STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
@@ -214,23 +239,21 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc);
}
- if (emit->do_viper_types) {
- // TODO set types of arguments based on type signature
- for (int i = 0; i < emit->local_vtype_alloc; i++) {
- emit->local_vtype[i] = VTYPE_UNBOUND;
- }
- for (int i = 0; i < emit->stack_info_alloc; i++) {
- emit->stack_info[i].kind = STACK_VALUE;
- emit->stack_info[i].vtype = VTYPE_UNBOUND;
- }
- } else {
- for (int i = 0; i < emit->local_vtype_alloc; i++) {
- emit->local_vtype[i] = VTYPE_PYOBJ;
- }
- for (int i = 0; i < emit->stack_info_alloc; i++) {
- emit->stack_info[i].kind = STACK_VALUE;
- emit->stack_info[i].vtype = VTYPE_PYOBJ;
- }
+ // set default type for return and arguments
+ emit->return_vtype = VTYPE_PYOBJ;
+ for (mp_uint_t i = 0; i < emit->scope->num_pos_args; i++) {
+ emit->local_vtype[i] = VTYPE_PYOBJ;
+ }
+
+ // local variables begin unbound, and have unknown type
+ for (mp_uint_t i = emit->scope->num_pos_args; i < emit->local_vtype_alloc; i++) {
+ emit->local_vtype[i] = VTYPE_UNBOUND;
+ }
+
+ // values on stack begin unbound
+ for (mp_uint_t i = 0; i < emit->stack_info_alloc; i++) {
+ emit->stack_info[i].kind = STACK_VALUE;
+ emit->stack_info[i].vtype = VTYPE_UNBOUND;
}
#if N_X64
@@ -310,11 +333,20 @@ STATIC void emit_native_end_pass(emit_t *emit) {
if (emit->pass == MP_PASS_EMIT) {
#if N_X64
void *f = asm_x64_get_code(emit->as);
- mp_emit_glue_assign_native(emit->scope->raw_code, emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY, f, asm_x64_get_code_size(emit->as), emit->scope->num_pos_args);
+ mp_uint_t f_len = asm_x64_get_code_size(emit->as);
#elif N_THUMB
void *f = asm_thumb_get_code(emit->as);
- mp_emit_glue_assign_native(emit->scope->raw_code, emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY, f, asm_thumb_get_code_size(emit->as), emit->scope->num_pos_args);
+ mp_uint_t f_len = asm_thumb_get_code_size(emit->as);
#endif
+
+ // compute type signature
+ // TODO check that viper types here convert correctly to valid types for emit glue
+ mp_uint_t type_sig = emit->return_vtype & 3;
+ for (mp_uint_t i = 0; i < emit->scope->num_pos_args; i++) {
+ type_sig |= (emit->local_vtype[i] & 3) << (i * 2 + 2);
+ }
+
+ mp_emit_glue_assign_native(emit->scope->raw_code, emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY, f, f_len, emit->scope->num_pos_args, type_sig);
}
}
@@ -498,130 +530,138 @@ STATIC void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, in
emit_post_push_reg(emit, vtyped, regd);
}
-// vtype of all n_pop objects is VTYPE_PYOBJ
-// does not use any temporary registers (but may use reg_dest before loading it with stack pointer)
-// TODO this needs some thinking for viper code
-STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, int reg_dest, int n_pop) {
- need_reg_all(emit);
- for (int i = 0; i < n_pop; i++) {
- stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
- // must push any imm's to stack
- // must convert them to VTYPE_PYOBJ for viper code
- if (si->kind == STACK_IMM) {
- si->kind = STACK_VALUE;
- switch (si->vtype) {
- case VTYPE_PYOBJ:
- ASM_MOV_IMM_TO_LOCAL_USING(si->u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
- break;
- case VTYPE_BOOL:
- si->vtype = VTYPE_PYOBJ;
- if (si->u_imm == 0) {
- ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
- } else {
- ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
- }
- break;
- case VTYPE_INT:
- si->vtype = VTYPE_PYOBJ;
- ASM_MOV_IMM_TO_LOCAL_USING((si->u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
- break;
- default:
- // not handled
- assert(0);
- }
- }
- assert(si->kind == STACK_VALUE);
- assert(si->vtype == VTYPE_PYOBJ);
- }
- adjust_stack(emit, -n_pop);
- ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
-}
-
-// vtype of all n_push objects is VTYPE_PYOBJ
-STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, int reg_dest, int n_push) {
- need_reg_all(emit);
- for (int i = 0; i < n_push; i++) {
- emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
- emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
- }
- ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
- adjust_stack(emit, n_push);
-}
-
-STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind, void *fun) {
+STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) {
need_reg_all(emit);
#if N_X64
- asm_x64_call_ind(emit->as, fun, REG_RAX);
+ asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
#elif N_THUMB
asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
#endif
}
-STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, void *fun, mp_int_t arg_val, int arg_reg) {
+STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
need_reg_all(emit);
ASM_MOV_IMM_TO_REG(arg_val, arg_reg);
#if N_X64
- asm_x64_call_ind(emit->as, fun, REG_RAX);
+ asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
#elif N_THUMB
asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
#endif
}
// the first arg is stored in the code aligned on a mp_uint_t boundary
-STATIC void emit_call_with_imm_arg_aligned(emit_t *emit, mp_fun_kind_t fun_kind, void *fun, mp_int_t arg_val, int arg_reg) {
+STATIC void emit_call_with_imm_arg_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
need_reg_all(emit);
ASM_MOV_ALIGNED_IMM_TO_REG(arg_val, arg_reg);
#if N_X64
- asm_x64_call_ind(emit->as, fun, REG_RAX);
+ asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
#elif N_THUMB
asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
#endif
}
-STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, void *fun, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
+STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
need_reg_all(emit);
ASM_MOV_IMM_TO_REG(arg_val1, arg_reg1);
ASM_MOV_IMM_TO_REG(arg_val2, arg_reg2);
#if N_X64
- asm_x64_call_ind(emit->as, fun, REG_RAX);
+ asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
#elif N_THUMB
asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
#endif
}
// the first arg is stored in the code aligned on a mp_uint_t boundary
-STATIC void emit_call_with_3_imm_args_and_first_aligned(emit_t *emit, mp_fun_kind_t fun_kind, void *fun, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2, mp_int_t arg_val3, int arg_reg3) {
+STATIC void emit_call_with_3_imm_args_and_first_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2, mp_int_t arg_val3, int arg_reg3) {
need_reg_all(emit);
ASM_MOV_ALIGNED_IMM_TO_REG(arg_val1, arg_reg1);
ASM_MOV_IMM_TO_REG(arg_val2, arg_reg2);
ASM_MOV_IMM_TO_REG(arg_val3, arg_reg3);
#if N_X64
- asm_x64_call_ind(emit->as, fun, REG_RAX);
+ asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
#elif N_THUMB
asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
#endif
}
-STATIC void emit_native_load_id(emit_t *emit, qstr qstr) {
- // check for built-ins
- if (strcmp(qstr_str(qstr), "v_int") == 0) {
- assert(0);
- emit_native_pre(emit);
- //emit_post_push_blank(emit, VTYPE_BUILTIN_V_INT);
+// vtype of all n_pop objects is VTYPE_PYOBJ
+// Will convert any items that are not VTYPE_PYOBJ to this type and put them back on the stack.
+// If any conversions of non-immediate values are needed, then it uses REG_ARG_1, REG_ARG_2 and REG_RET.
+// Otherwise, it does not use any temporary registers (but may use reg_dest before loading it with stack pointer).
+STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_pop) {
+ need_reg_all(emit);
- // not a built-in, so do usual thing
- } else {
- emit_common_load_id(emit, &EXPORT_FUN(method_table), emit->scope, qstr);
+ // First, store any immediate values to their respective place on the stack.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ // must push any imm's to stack
+ // must convert them to VTYPE_PYOBJ for viper code
+ if (si->kind == STACK_IMM) {
+ si->kind = STACK_VALUE;
+ switch (si->vtype) {
+ case VTYPE_PYOBJ:
+ ASM_MOV_IMM_TO_LOCAL_USING(si->u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ break;
+ case VTYPE_BOOL:
+ if (si->u_imm == 0) {
+ ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ } else {
+ ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ }
+ si->vtype = VTYPE_PYOBJ;
+ break;
+ case VTYPE_INT:
+ case VTYPE_UINT:
+ ASM_MOV_IMM_TO_LOCAL_USING((si->u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ si->vtype = VTYPE_PYOBJ;
+ break;
+ default:
+ // not handled
+ assert(0);
+ }
+ }
+
+ // verify that this value is on the stack
+ assert(si->kind == STACK_VALUE);
}
+
+ // Second, convert any non-VTYPE_PYOBJ to that type.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ if (si->vtype != VTYPE_PYOBJ) {
+ mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i;
+ ASM_MOV_LOCAL_TO_REG(local_num, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_TO_LOCAL(REG_RET, local_num);
+ si->vtype = VTYPE_PYOBJ;
+ }
+ }
+
+ // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
+ adjust_stack(emit, -n_pop);
+ ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
+}
+
+// vtype of all n_push objects is VTYPE_PYOBJ
+STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_push) {
+ need_reg_all(emit);
+ for (mp_uint_t i = 0; i < n_push; i++) {
+ emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
+ emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
+ }
+ ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
+ adjust_stack(emit, n_push);
+}
+
+STATIC void emit_native_load_id(emit_t *emit, qstr qstr) {
+ emit_common_load_id(emit, &EXPORT_FUN(method_table), emit->scope, qstr);
}
STATIC void emit_native_store_id(emit_t *emit, qstr qstr) {
- // TODO check for built-ins and disallow
emit_common_store_id(emit, &EXPORT_FUN(method_table), emit->scope, qstr);
}
STATIC void emit_native_delete_id(emit_t *emit, qstr qstr) {
- // TODO check for built-ins and disallow
emit_common_delete_id(emit, &EXPORT_FUN(method_table), emit->scope, qstr);
}
@@ -644,7 +684,7 @@ STATIC void emit_native_import_name(emit_t *emit, qstr qst) {
emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); // arg2 = fromlist, arg3 = level
assert(vtype_fromlist == VTYPE_PYOBJ);
assert(vtype_level == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_IMPORT_NAME, mp_import_name, qst, REG_ARG_1); // arg1 = import name
+ emit_call_with_imm_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -654,7 +694,7 @@ STATIC void emit_native_import_from(emit_t *emit, qstr qst) {
vtype_kind_t vtype_module;
emit_access_stack(emit, 1, &vtype_module, REG_ARG_1); // arg1 = module
assert(vtype_module == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_IMPORT_FROM, mp_import_from, qst, REG_ARG_2); // arg2 = import name
+ emit_call_with_imm_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -663,7 +703,7 @@ STATIC void emit_native_import_star(emit_t *emit) {
vtype_kind_t vtype_module;
emit_pre_pop_reg(emit, &vtype_module, REG_ARG_1); // arg1 = module
assert(vtype_module == VTYPE_PYOBJ);
- emit_call(emit, MP_F_IMPORT_ALL, mp_import_all);
+ emit_call(emit, MP_F_IMPORT_ALL);
emit_post(emit);
}
@@ -705,14 +745,14 @@ STATIC void emit_native_load_const_int(emit_t *emit, qstr qst) {
DEBUG_printf("load_const_int %s\n", qstr_str(st));
// for viper: load integer, check fits in 32 bits
emit_native_pre(emit);
- emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_INT, mp_load_const_int, qst, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_INT, qst, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
STATIC void emit_native_load_const_dec(emit_t *emit, qstr qstr) {
// for viper, a float/complex is just a Python object
emit_native_pre(emit);
- emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_DEC, mp_load_const_dec, qstr, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_DEC, qstr, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -725,9 +765,9 @@ STATIC void emit_native_load_const_str(emit_t *emit, qstr qstr, bool bytes) {
emit_post_push_imm(emit, VTYPE_PTR, (mp_uint_t)qstr_str(qstr));
} else {
if (bytes) {
- emit_call_with_imm_arg(emit, 0, mp_load_const_bytes, qstr, REG_ARG_1); // TODO need to add function to runtime table
+ emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_BYTES, qstr, REG_ARG_1);
} else {
- emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_STR, mp_load_const_str, qstr, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_CONST_STR, qstr, REG_ARG_1);
}
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -775,13 +815,13 @@ STATIC void emit_native_load_deref(emit_t *emit, qstr qstr, int local_num) {
STATIC void emit_native_load_name(emit_t *emit, qstr qstr) {
emit_native_pre(emit);
- emit_call_with_imm_arg(emit, MP_F_LOAD_NAME, mp_load_name, qstr, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_NAME, qstr, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
STATIC void emit_native_load_global(emit_t *emit, qstr qstr) {
emit_native_pre(emit);
- emit_call_with_imm_arg(emit, MP_F_LOAD_GLOBAL, mp_load_global, qstr, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_LOAD_GLOBAL, qstr, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -793,7 +833,7 @@ STATIC void emit_native_load_attr(emit_t *emit, qstr qstr) {
vtype_kind_t vtype_base;
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
assert(vtype_base == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_LOAD_ATTR, mp_load_attr, qstr, REG_ARG_2); // arg2 = attribute name
+ emit_call_with_imm_arg(emit, MP_F_LOAD_ATTR, qstr, REG_ARG_2); // arg2 = attribute name
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -802,12 +842,12 @@ STATIC void emit_native_load_method(emit_t *emit, qstr qstr) {
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
assert(vtype_base == VTYPE_PYOBJ);
emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
- emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, mp_load_method, qstr, REG_ARG_2); // arg2 = method name
+ emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, qstr, REG_ARG_2); // arg2 = method name
}
STATIC void emit_native_load_build_class(emit_t *emit) {
emit_native_pre(emit);
- emit_call(emit, MP_F_LOAD_BUILD_CLASS, mp_load_build_class);
+ emit_call(emit, MP_F_LOAD_BUILD_CLASS);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -815,11 +855,10 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
vtype_kind_t vtype_lhs, vtype_rhs;
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
- emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, mp_obj_subscr, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
} else {
printf("ViperTypeError: can't do subscr of types %d and %d\n", vtype_lhs, vtype_rhs);
- assert(0);
}
}
@@ -867,13 +906,21 @@ STATIC void emit_native_store_name(emit_t *emit, qstr qstr) {
vtype_kind_t vtype;
emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
assert(vtype == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_STORE_NAME, mp_store_name, qstr, REG_ARG_1); // arg1 = name
+ emit_call_with_imm_arg(emit, MP_F_STORE_NAME, qstr, REG_ARG_1); // arg1 = name
emit_post(emit);
}
STATIC void emit_native_store_global(emit_t *emit, qstr qstr) {
- // not implemented
- assert(0);
+ vtype_kind_t vtype = peek_vtype(emit);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_TO_REG(REG_RET, REG_ARG_2);
+ }
+ emit_call_with_imm_arg(emit, MP_F_STORE_GLOBAL, qstr, REG_ARG_1); // arg1 = name
+ emit_post(emit);
}
STATIC void emit_native_store_attr(emit_t *emit, qstr qstr) {
@@ -881,7 +928,7 @@ STATIC void emit_native_store_attr(emit_t *emit, qstr qstr) {
emit_pre_pop_reg_reg(emit, &vtype_base, REG_ARG_1, &vtype_val, REG_ARG_3); // arg1 = base, arg3 = value
assert(vtype_base == VTYPE_PYOBJ);
assert(vtype_val == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_STORE_ATTR, mp_store_attr, qstr, REG_ARG_2); // arg2 = attribute name
+ emit_call_with_imm_arg(emit, MP_F_STORE_ATTR, qstr, REG_ARG_2); // arg2 = attribute name
emit_post(emit);
}
@@ -895,7 +942,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
assert(vtype_index == VTYPE_PYOBJ);
assert(vtype_base == VTYPE_PYOBJ);
assert(vtype_value == VTYPE_PYOBJ);
- emit_call(emit, MP_F_OBJ_SUBSCR, mp_obj_subscr);
+ emit_call(emit, MP_F_OBJ_SUBSCR);
}
STATIC void emit_native_delete_fast(emit_t *emit, qstr qstr, int local_num) {
@@ -925,7 +972,7 @@ STATIC void emit_native_delete_attr(emit_t *emit, qstr qstr) {
vtype_kind_t vtype_base;
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
assert(vtype_base == VTYPE_PYOBJ);
- emit_call_with_2_imm_args(emit, MP_F_STORE_ATTR, mp_store_attr, qstr, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); // arg2 = attribute name, arg3 = value (null for delete)
+ emit_call_with_2_imm_args(emit, MP_F_STORE_ATTR, qstr, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); // arg2 = attribute name, arg3 = value (null for delete)
emit_post(emit);
}
@@ -934,7 +981,7 @@ STATIC void emit_native_delete_subscr(emit_t *emit) {
emit_pre_pop_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1); // index, base
assert(vtype_index == VTYPE_PYOBJ);
assert(vtype_base == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, mp_obj_subscr, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
}
STATIC void emit_native_dup_top(emit_t *emit) {
@@ -988,7 +1035,7 @@ STATIC void emit_native_jump_helper(emit_t *emit, uint label, bool pop) {
}
} else if (vtype == VTYPE_PYOBJ) {
emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
- emit_call(emit, MP_F_OBJ_IS_TRUE, mp_obj_is_true);
+ emit_call(emit, MP_F_OBJ_IS_TRUE);
if (!pop) {
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -1072,7 +1119,7 @@ STATIC void emit_native_setup_except(emit_t *emit, uint label) {
// need to commit stack because we may jump elsewhere
need_stack_settled(emit);
emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf
- emit_call(emit, 0, nlr_push); // TODO need to add function to runtime table
+ emit_call(emit, MP_F_NLR_PUSH);
#if N_X64
asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
asm_x64_jcc_label(emit->as, JCC_JNZ, label);
@@ -1098,7 +1145,7 @@ STATIC void emit_native_get_iter(emit_t *emit) {
vtype_kind_t vtype;
emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
assert(vtype == VTYPE_PYOBJ);
- emit_call(emit, MP_F_GETITER, mp_getiter);
+ emit_call(emit, MP_F_GETITER);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -1107,7 +1154,7 @@ STATIC void emit_native_for_iter(emit_t *emit, uint label) {
vtype_kind_t vtype;
emit_access_stack(emit, 1, &vtype, REG_ARG_1);
assert(vtype == VTYPE_PYOBJ);
- emit_call(emit, MP_F_ITERNEXT, mp_iternext);
+ emit_call(emit, MP_F_ITERNEXT);
ASM_MOV_IMM_TO_REG((mp_uint_t)MP_OBJ_STOP_ITERATION, REG_TEMP1);
#if N_X64
asm_x64_cmp_r64_with_r64(emit->as, REG_RET, REG_TEMP1);
@@ -1128,7 +1175,7 @@ STATIC void emit_native_for_iter_end(emit_t *emit) {
STATIC void emit_native_pop_block(emit_t *emit) {
emit_native_pre(emit);
- emit_call(emit, 0, nlr_pop); // TODO need to add function to runtime table
+ emit_call(emit, MP_F_NLR_POP);
adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)));
emit_post(emit);
}
@@ -1136,7 +1183,7 @@ STATIC void emit_native_pop_block(emit_t *emit) {
STATIC void emit_native_pop_except(emit_t *emit) {
/*
emit_native_pre(emit);
- emit_call(emit, 0, nlr_pop); // TODO need to add function to runtime table
+ emit_call(emit, MP_F_NLR_POP);
adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)));
emit_post(emit);
*/
@@ -1150,7 +1197,7 @@ STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
vtype_kind_t vtype;
emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
assert(vtype == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_UNARY_OP, mp_unary_op, op, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_UNARY_OP, op, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
}
@@ -1183,11 +1230,11 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
assert(0);
}
} else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
- emit_call_with_imm_arg(emit, MP_F_BINARY_OP, mp_binary_op, op, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_BINARY_OP, op, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
} else {
printf("ViperTypeError: can't do binary op between types %d and %d\n", vtype_lhs, vtype_rhs);
- assert(0);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
}
@@ -1196,14 +1243,14 @@ STATIC void emit_native_build_tuple(emit_t *emit, int n_args) {
// if wrapped in byte_array, or something, allocates memory and fills it
emit_native_pre(emit);
emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
- emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE, mp_obj_new_tuple, n_args, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE, n_args, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple
}
STATIC void emit_native_build_list(emit_t *emit, int n_args) {
emit_native_pre(emit);
emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
- emit_call_with_imm_arg(emit, MP_F_BUILD_LIST, mp_obj_new_list, n_args, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_BUILD_LIST, n_args, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new list
}
@@ -1214,13 +1261,13 @@ STATIC void emit_native_list_append(emit_t *emit, int list_index) {
emit_access_stack(emit, list_index, &vtype_list, REG_ARG_1);
assert(vtype_list == VTYPE_PYOBJ);
assert(vtype_item == VTYPE_PYOBJ);
- emit_call(emit, MP_F_LIST_APPEND, mp_obj_list_append);
+ emit_call(emit, MP_F_LIST_APPEND);
emit_post(emit);
}
STATIC void emit_native_build_map(emit_t *emit, int n_args) {
emit_native_pre(emit);
- emit_call_with_imm_arg(emit, MP_F_BUILD_MAP, mp_obj_new_dict, n_args, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_BUILD_MAP, n_args, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new map
}
@@ -1230,7 +1277,7 @@ STATIC void emit_native_store_map(emit_t *emit) {
assert(vtype_key == VTYPE_PYOBJ);
assert(vtype_value == VTYPE_PYOBJ);
assert(vtype_map == VTYPE_PYOBJ);
- emit_call(emit, MP_F_STORE_MAP, mp_obj_dict_store);
+ emit_call(emit, MP_F_STORE_MAP);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // map
}
@@ -1242,14 +1289,14 @@ STATIC void emit_native_map_add(emit_t *emit, int map_index) {
assert(vtype_map == VTYPE_PYOBJ);
assert(vtype_key == VTYPE_PYOBJ);
assert(vtype_value == VTYPE_PYOBJ);
- emit_call(emit, MP_F_STORE_MAP, mp_obj_dict_store);
+ emit_call(emit, MP_F_STORE_MAP);
emit_post(emit);
}
STATIC void emit_native_build_set(emit_t *emit, int n_args) {
emit_native_pre(emit);
emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
- emit_call_with_imm_arg(emit, MP_F_BUILD_SET, mp_obj_new_set, n_args, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_BUILD_SET, n_args, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new set
}
@@ -1260,7 +1307,7 @@ STATIC void emit_native_set_add(emit_t *emit, int set_index) {
emit_access_stack(emit, set_index, &vtype_set, REG_ARG_1);
assert(vtype_set == VTYPE_PYOBJ);
assert(vtype_item == VTYPE_PYOBJ);
- emit_call(emit, MP_F_STORE_SET, mp_obj_set_store);
+ emit_call(emit, MP_F_STORE_SET);
emit_post(emit);
}
@@ -1271,7 +1318,7 @@ STATIC void emit_native_build_slice(emit_t *emit, int n_args) {
emit_pre_pop_reg_reg(emit, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop
assert(vtype_start == VTYPE_PYOBJ);
assert(vtype_stop == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_NEW_SLICE, mp_obj_new_slice, (mp_uint_t)mp_const_none, REG_ARG_3); // arg3 = step
+ emit_call_with_imm_arg(emit, MP_F_NEW_SLICE, (mp_uint_t)mp_const_none, REG_ARG_3); // arg3 = step
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
} else {
assert(n_args == 3);
@@ -1280,7 +1327,7 @@ STATIC void emit_native_build_slice(emit_t *emit, int n_args) {
assert(vtype_start == VTYPE_PYOBJ);
assert(vtype_stop == VTYPE_PYOBJ);
assert(vtype_step == VTYPE_PYOBJ);
- emit_call(emit, MP_F_NEW_SLICE, mp_obj_new_slice);
+ emit_call(emit, MP_F_NEW_SLICE);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
}
@@ -1291,7 +1338,7 @@ STATIC void emit_native_unpack_sequence(emit_t *emit, int n_args) {
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
assert(vtype_base == VTYPE_PYOBJ);
emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_args); // arg3 = dest ptr
- emit_call_with_imm_arg(emit, MP_F_UNPACK_SEQUENCE, mp_unpack_sequence, n_args, REG_ARG_2); // arg2 = n_args
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_SEQUENCE, n_args, REG_ARG_2); // arg2 = n_args
}
STATIC void emit_native_unpack_ex(emit_t *emit, int n_left, int n_right) {
@@ -1300,20 +1347,20 @@ STATIC void emit_native_unpack_ex(emit_t *emit, int n_left, int n_right) {
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
assert(vtype_base == VTYPE_PYOBJ);
emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_left + n_right + 1); // arg3 = dest ptr
- emit_call_with_imm_arg(emit, MP_F_UNPACK_EX, mp_unpack_ex, n_left | (n_right << 8), REG_ARG_2); // arg2 = n_left + n_right
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_EX, n_left | (n_right << 8), REG_ARG_2); // arg2 = n_left + n_right
}
STATIC void emit_native_make_function(emit_t *emit, scope_t *scope, uint n_pos_defaults, uint n_kw_defaults) {
// call runtime, with type info for args, or don't support dict/default params, or only support Python objects for them
emit_native_pre(emit);
if (n_pos_defaults == 0 && n_kw_defaults == 0) {
- emit_call_with_3_imm_args_and_first_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, mp_make_function_from_raw_code, (mp_uint_t)scope->raw_code, REG_ARG_1, (mp_uint_t)MP_OBJ_NULL, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+ emit_call_with_3_imm_args_and_first_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1, (mp_uint_t)MP_OBJ_NULL, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
} else {
vtype_kind_t vtype_def_tuple, vtype_def_dict;
emit_pre_pop_reg_reg(emit, &vtype_def_dict, REG_ARG_3, &vtype_def_tuple, REG_ARG_2);
assert(vtype_def_tuple == VTYPE_PYOBJ);
assert(vtype_def_dict == VTYPE_PYOBJ);
- emit_call_with_imm_arg_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, mp_make_function_from_raw_code, (mp_uint_t)scope->raw_code, REG_ARG_1);
+ emit_call_with_imm_arg_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1);
}
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -1333,20 +1380,20 @@ STATIC void emit_native_call_function(emit_t *emit, int n_positional, int n_keyw
vtype_kind_t vtype_fun;
emit_pre_pop_reg(emit, &vtype_fun, REG_ARG_1); // the function
assert(vtype_fun == VTYPE_PYOBJ);
- emit_call(emit, MP_F_CALL_FUNCTION_0, mp_call_function_0);
+ emit_call(emit, MP_F_CALL_FUNCTION_0);
} else if (n_positional == 1) {
vtype_kind_t vtype_fun, vtype_arg1;
emit_pre_pop_reg_reg(emit, &vtype_arg1, REG_ARG_2, &vtype_fun, REG_ARG_1); // the single argument, the function
assert(vtype_fun == VTYPE_PYOBJ);
assert(vtype_arg1 == VTYPE_PYOBJ);
- emit_call(emit, MP_F_CALL_FUNCTION_1, mp_call_function_1);
+ emit_call(emit, MP_F_CALL_FUNCTION_1);
} else if (n_positional == 2) {
vtype_kind_t vtype_fun, vtype_arg1, vtype_arg2;
emit_pre_pop_reg_reg_reg(emit, &vtype_arg2, REG_ARG_3, &vtype_arg1, REG_ARG_2, &vtype_fun, REG_ARG_1); // the second argument, the first argument, the function
assert(vtype_fun == VTYPE_PYOBJ);
assert(vtype_arg1 == VTYPE_PYOBJ);
assert(vtype_arg2 == VTYPE_PYOBJ);
- emit_call(emit, MP_F_CALL_FUNCTION_2, mp_call_function_2);
+ emit_call(emit, MP_F_CALL_FUNCTION_2);
} else {
*/
@@ -1357,7 +1404,7 @@ STATIC void emit_native_call_function(emit_t *emit, int n_positional, int n_keyw
vtype_kind_t vtype_fun;
emit_pre_pop_reg(emit, &vtype_fun, REG_ARG_1); // the function
assert(vtype_fun == VTYPE_PYOBJ);
- emit_call_with_imm_arg(emit, MP_F_CALL_FUNCTION_N_KW_FOR_NATIVE, mp_call_function_n_kw_for_native, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_call_with_imm_arg(emit, MP_F_NATIVE_CALL_FUNCTION_N_KW, n_positional | (n_keyword << 8), REG_ARG_2);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
@@ -1370,31 +1417,35 @@ STATIC void emit_native_call_method(emit_t *emit, int n_positional, int n_keywor
emit_pre_pop_reg_reg(emit, &vtype_self, REG_ARG_2, &vtype_meth, REG_ARG_1); // the self object (or NULL), the method
assert(vtype_meth == VTYPE_PYOBJ);
assert(vtype_self == VTYPE_PYOBJ);
- emit_call(emit, MP_F_CALL_METHOD_1, mp_call_method_1);
+ emit_call(emit, MP_F_CALL_METHOD_1);
} else if (n_positional == 1) {
vtype_kind_t vtype_meth, vtype_self, vtype_arg1;
emit_pre_pop_reg_reg_reg(emit, &vtype_arg1, REG_ARG_3, &vtype_self, REG_ARG_2, &vtype_meth, REG_ARG_1); // the first argument, the self object (or NULL), the method
assert(vtype_meth == VTYPE_PYOBJ);
assert(vtype_self == VTYPE_PYOBJ);
assert(vtype_arg1 == VTYPE_PYOBJ);
- emit_call(emit, MP_F_CALL_METHOD_2, mp_call_method_2);
+ emit_call(emit, MP_F_CALL_METHOD_2);
} else {
*/
emit_native_pre(emit);
emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2 + n_positional + 2 * n_keyword); // pointer to items, including meth and self
- emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, mp_call_method_n_kw, n_positional, REG_ARG_1, n_keyword, REG_ARG_2);
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, n_positional, REG_ARG_1, n_keyword, REG_ARG_2);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
}
STATIC void emit_native_return_value(emit_t *emit) {
DEBUG_printf("return_value\n");
- // easy. since we don't know who we return to, just return the raw value.
- // runtime needs then to know our type signature, but I think that's possible.
vtype_kind_t vtype;
emit_pre_pop_reg(emit, &vtype, REG_RET);
if (emit->do_viper_types) {
- assert(vtype == VTYPE_PTR_NONE);
+ if (vtype == VTYPE_PTR_NONE) {
+ if (emit->return_vtype == VTYPE_PYOBJ) {
+ ASM_MOV_IMM_TO_REG((mp_uint_t)mp_const_none, REG_RET);
+ }
+ } else if (vtype != emit->return_vtype) {
+ printf("ViperTypeError: incompatible return type\n");
+ }
} else {
assert(vtype == VTYPE_PYOBJ);
}
@@ -1410,13 +1461,13 @@ STATIC void emit_native_return_value(emit_t *emit) {
STATIC void emit_native_raise_varargs(emit_t *emit, int n_args) {
assert(n_args == 1);
- vtype_kind_t vtype_err;
- emit_pre_pop_reg(emit, &vtype_err, REG_ARG_1); // arg1 = object to raise
- assert(vtype_err == VTYPE_PYOBJ);
- emit_call(emit, 0, mp_make_raise_obj); // TODO need to add function to runtime table
- emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
- emit_pre_pop_reg(emit, &vtype_err, REG_ARG_1);
- emit_call(emit, 0, nlr_jump); // TODO need to add function to runtime table
+ vtype_kind_t vtype_exc;
+ emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
+ if (vtype_exc != VTYPE_PYOBJ) {
+ printf("ViperTypeError: must raise an object\n");
+ }
+ // TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
+ emit_call(emit, MP_F_NATIVE_RAISE);
}
STATIC void emit_native_yield_value(emit_t *emit) {
@@ -1444,7 +1495,7 @@ STATIC void emit_native_end_except_handler(emit_t *emit) {
}
const emit_method_table_t EXPORT_FUN(method_table) = {
- emit_native_set_viper_types,
+ emit_native_set_native_type,
emit_native_start_pass,
emit_native_end_pass,
emit_native_last_emit_was_return_value,
diff --git a/py/modcmath.c b/py/modcmath.c
index 6fdd1f77b..a147a754b 100644
--- a/py/modcmath.c
+++ b/py/modcmath.c
@@ -68,7 +68,7 @@ mp_obj_t mp_cmath_polar(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_polar_obj, mp_cmath_polar);
/// \function rect(r, phi)
-/// Returns the complex number with modules `r` and phase `phi`.
+/// Returns the complex number with modulus `r` and phase `phi`.
mp_obj_t mp_cmath_rect(mp_obj_t r_obj, mp_obj_t phi_obj) {
mp_float_t r = mp_obj_get_float(r_obj);
mp_float_t phi = mp_obj_get_float(phi_obj);
@@ -77,6 +77,7 @@ mp_obj_t mp_cmath_rect(mp_obj_t r_obj, mp_obj_t phi_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_cmath_rect_obj, mp_cmath_rect);
/// \function exp(z)
+/// Return the exponential of `z`.
mp_obj_t mp_cmath_exp(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
@@ -86,6 +87,7 @@ mp_obj_t mp_cmath_exp(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_exp_obj, mp_cmath_exp);
/// \function log(z)
+/// Return the natural logarithm of `z`. The branch cut is along the negative real axis.
// TODO can take second argument, being the base
mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
mp_float_t real, imag;
@@ -95,6 +97,7 @@ mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
/// \function log10(z)
+/// Return the base-10 logarithm of `z`. The branch cut is along the negative real axis.
mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
@@ -103,6 +106,7 @@ mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
/// \function sqrt(z)
+/// Return the square-root of `z`.
mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
@@ -113,6 +117,7 @@ mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sqrt_obj, mp_cmath_sqrt);
/// \function cos(z)
+/// Return the cosine of `z`.
mp_obj_t mp_cmath_cos(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
@@ -121,6 +126,7 @@ mp_obj_t mp_cmath_cos(mp_obj_t z_obj) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_cos_obj, mp_cmath_cos);
/// \function sin(z)
+/// Return the sine of `z`.
mp_obj_t mp_cmath_sin(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 351910b6d..850d05524 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -111,6 +111,9 @@
#define MICROPY_EMIT_INLINE_THUMB (0)
#endif
+// Convenience definition for whether any native emitter is enabled
+#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_THUMB)
+
/*****************************************************************************/
/* Compiler configuration */
@@ -368,10 +371,15 @@ typedef double mp_float_t;
// Extended modules
+
#ifndef MICROPY_PY_UCTYPES
#define MICROPY_PY_UCTYPES (0)
#endif
+#ifndef MICROPY_PY_ZLIBD
+#define MICROPY_PY_ZLIBD (0)
+#endif
+
/*****************************************************************************/
/* Hooks for a port to add builtins */
diff --git a/py/obj.c b/py/obj.c
index 04716454d..d8fccfb7b 100644
--- a/py/obj.c
+++ b/py/obj.c
@@ -360,6 +360,16 @@ uint mp_get_index(const mp_obj_type_t *type, mp_uint_t len, mp_obj_t index, bool
return i;
}
+// will raise a TypeError if object has no length
+mp_obj_t mp_obj_len(mp_obj_t o_in) {
+ mp_obj_t len = mp_obj_len_maybe(o_in);
+ if (len == MP_OBJ_NULL) {
+ nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "object of type '%s' has no len()", mp_obj_get_type_str(o_in)));
+ } else {
+ return len;
+ }
+}
+
// may return MP_OBJ_NULL
mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) {
if (
diff --git a/py/obj.h b/py/obj.h
index 928402d44..6fe671972 100644
--- a/py/obj.h
+++ b/py/obj.h
@@ -312,6 +312,7 @@ extern const mp_obj_type_t mp_type_classmethod;
extern const mp_obj_type_t mp_type_property;
extern const mp_obj_type_t mp_type_stringio;
extern const mp_obj_type_t mp_type_bytesio;
+extern const mp_obj_type_t mp_type_reversed;
// Exceptions
extern const mp_obj_type_t mp_type_BaseException;
@@ -377,6 +378,7 @@ mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, uint n_args, c
mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg);
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
mp_obj_t mp_obj_new_fun_bc(uint scope_flags, qstr *args, uint n_pos_args, uint n_kwonly_args, mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code);
+mp_obj_t mp_obj_new_fun_viper(uint n_args, void *fun, mp_uint_t type_sig);
mp_obj_t mp_obj_new_fun_asm(uint n_args, void *fun);
mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun);
mp_obj_t mp_obj_new_closure(mp_obj_t fun, uint n_closed, const mp_obj_t *closed);
@@ -424,6 +426,7 @@ void mp_obj_get_complex(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
void mp_obj_get_array(mp_obj_t o, uint *len, mp_obj_t **items);
void mp_obj_get_array_fixed_n(mp_obj_t o, uint len, mp_obj_t **items);
uint mp_get_index(const mp_obj_type_t *type, mp_uint_t len, mp_obj_t index, bool is_slice);
+mp_obj_t mp_obj_len(mp_obj_t o_in);
mp_obj_t mp_obj_len_maybe(mp_obj_t o_in); /* may return MP_OBJ_NULL */
mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t val);
diff --git a/py/objcomplex.c b/py/objcomplex.c
index 20e7c97d3..0c58dee65 100644
--- a/py/objcomplex.c
+++ b/py/objcomplex.c
@@ -132,6 +132,15 @@ STATIC mp_obj_t complex_binary_op(int op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
return mp_obj_complex_binary_op(op, lhs->real, lhs->imag, rhs_in);
}
+STATIC void complex_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ mp_obj_complex_t *self = self_in;
+ if (attr == MP_QSTR_real) {
+ dest[0] = mp_obj_new_float(self->real);
+ } else if (attr == MP_QSTR_imag) {
+ dest[0] = mp_obj_new_float(self->imag);
+ }
+}
+
const mp_obj_type_t mp_type_complex = {
{ &mp_type_type },
.name = MP_QSTR_complex,
@@ -139,6 +148,7 @@ const mp_obj_type_t mp_type_complex = {
.make_new = complex_make_new,
.unary_op = complex_unary_op,
.binary_op = complex_binary_op,
+ .load_attr = complex_load_attr,
};
mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag) {
diff --git a/py/objfun.c b/py/objfun.c
index 409525c67..63dbd4f15 100644
--- a/py/objfun.c
+++ b/py/objfun.c
@@ -501,8 +501,67 @@ mp_obj_t mp_obj_new_fun_bc(uint scope_flags, qstr *args, uint n_pos_args, uint n
}
/******************************************************************************/
+/* viper functions */
+
+#if MICROPY_EMIT_NATIVE
+
+typedef struct _mp_obj_fun_viper_t {
+ mp_obj_base_t base;
+ int n_args;
+ void *fun;
+ mp_uint_t type_sig;
+} mp_obj_fun_viper_t;
+
+typedef mp_uint_t (*viper_fun_0_t)();
+typedef mp_uint_t (*viper_fun_1_t)(mp_uint_t);
+typedef mp_uint_t (*viper_fun_2_t)(mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*viper_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t);
+
+STATIC mp_obj_t fun_viper_call(mp_obj_t self_in, uint n_args, uint n_kw, const mp_obj_t *args) {
+ mp_obj_fun_viper_t *self = self_in;
+
+ mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
+
+ mp_uint_t ret;
+ if (n_args == 0) {
+ ret = ((viper_fun_0_t)self->fun)();
+ } else if (n_args == 1) {
+ ret = ((viper_fun_1_t)self->fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 2));
+ } else if (n_args == 2) {
+ ret = ((viper_fun_2_t)self->fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 2), mp_convert_obj_to_native(args[1], self->type_sig >> 4));
+ } else if (n_args == 3) {
+ ret = ((viper_fun_3_t)self->fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 2), mp_convert_obj_to_native(args[1], self->type_sig >> 4), mp_convert_obj_to_native(args[2], self->type_sig >> 6));
+ } else {
+ assert(0);
+ ret = 0;
+ }
+
+ return mp_convert_native_to_obj(ret, self->type_sig);
+}
+
+STATIC const mp_obj_type_t mp_type_fun_viper = {
+ { &mp_type_type },
+ .name = MP_QSTR_function,
+ .call = fun_viper_call,
+ .binary_op = fun_binary_op,
+};
+
+mp_obj_t mp_obj_new_fun_viper(uint n_args, void *fun, mp_uint_t type_sig) {
+ mp_obj_fun_viper_t *o = m_new_obj(mp_obj_fun_viper_t);
+ o->base.type = &mp_type_fun_viper;
+ o->n_args = n_args;
+ o->fun = fun;
+ o->type_sig = type_sig;
+ return o;
+}
+
+#endif // MICROPY_EMIT_NATIVE
+
+/******************************************************************************/
/* inline assembler functions */
+#if MICROPY_EMIT_INLINE_THUMB
+
typedef struct _mp_obj_fun_asm_t {
mp_obj_base_t base;
int n_args;
@@ -603,3 +662,5 @@ mp_obj_t mp_obj_new_fun_asm(uint n_args, void *fun) {
o->fun = fun;
return o;
}
+
+#endif // MICROPY_EMIT_INLINE_THUMB
diff --git a/py/objint.c b/py/objint.c
index c08bf7da6..d088ae1a8 100644
--- a/py/objint.c
+++ b/py/objint.c
@@ -289,7 +289,7 @@ mp_obj_t mp_obj_int_binary_op_extra_cases(int op, mp_obj_t lhs_in, mp_obj_t rhs_
// true acts as 0
return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(1));
} else if (op == MP_BINARY_OP_MULTIPLY) {
- if (MP_OBJ_IS_STR(rhs_in) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_list)) {
+ if (MP_OBJ_IS_STR(rhs_in) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_bytes) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_list)) {
// multiply is commutative for these types, so delegate to them
return mp_binary_op(op, rhs_in, lhs_in);
}
diff --git a/py/objlist.c b/py/objlist.c
index 655a78908..578e39452 100644
--- a/py/objlist.c
+++ b/py/objlist.c
@@ -131,6 +131,9 @@ STATIC mp_obj_t list_binary_op(int op, mp_obj_t lhs, mp_obj_t rhs) {
if (!mp_obj_get_int_maybe(rhs, &n)) {
return MP_OBJ_NULL; // op not supported
}
+ if (n < 0) {
+ n = 0;
+ }
mp_obj_list_t *s = list_new(o->len * n);
mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
return s;
diff --git a/py/objrange.c b/py/objrange.c
index 49fa10acc..3bd974f74 100644
--- a/py/objrange.c
+++ b/py/objrange.c
@@ -31,6 +31,7 @@
#include "misc.h"
#include "qstr.h"
#include "obj.h"
+#include "runtime0.h"
#include "runtime.h"
/******************************************************************************/
@@ -82,6 +83,16 @@ typedef struct _mp_obj_range_t {
mp_int_t step;
} mp_obj_range_t;
+STATIC void range_print(void (*print)(void *env, const char *fmt, ...), void *env, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_range_t *self = self_in;
+ print(env, "range(%d, %d", self->start, self->stop);
+ if (self->step == 1) {
+ print(env, ")");
+ } else {
+ print(env, ", %d)", self->step);
+ }
+}
+
STATIC mp_obj_t range_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const mp_obj_t *args) {
mp_arg_check_num(n_args, n_kw, 1, 3, false);
@@ -96,6 +107,7 @@ STATIC mp_obj_t range_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const m
o->start = mp_obj_get_int(args[0]);
o->stop = mp_obj_get_int(args[1]);
if (n_args == 3) {
+ // TODO check step is non-zero
o->step = mp_obj_get_int(args[2]);
}
}
@@ -103,6 +115,55 @@ STATIC mp_obj_t range_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const m
return o;
}
+STATIC mp_int_t range_len(mp_obj_range_t *self) {
+ // When computing length, need to take into account step!=1 and step<0.
+ mp_int_t len = self->stop - self->start + self->step;
+ if (self->step > 0) {
+ len -= 1;
+ } else {
+ len += 1;
+ }
+ len = len / self->step;
+ if (len < 0) {
+ len = 0;
+ }
+ return len;
+}
+
+STATIC mp_obj_t range_unary_op(int op, mp_obj_t self_in) {
+ mp_obj_range_t *self = self_in;
+ mp_int_t len = range_len(self);
+ switch (op) {
+ case MP_UNARY_OP_BOOL: return MP_BOOL(len > 0);
+ case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(len);
+ default: return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t range_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_range_t *self = self_in;
+ mp_int_t len = range_len(self);
+#if MICROPY_PY_BUILTINS_SLICE
+ if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ mp_seq_get_fast_slice_indexes(len, index, &slice);
+ mp_obj_range_t *o = m_new_obj(mp_obj_range_t);
+ o->base.type = &mp_type_range;
+ o->start = slice.start;
+ o->stop = slice.stop;
+ o->step = slice.step;
+ return o;
+ }
+#endif
+ uint index_val = mp_get_index(self->base.type, len, index, false);
+ return MP_OBJ_NEW_SMALL_INT(self->start + index_val * self->step);
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
STATIC mp_obj_t range_getiter(mp_obj_t o_in) {
mp_obj_range_t *o = o_in;
return mp_obj_new_range_iterator(o->start, o->stop, o->step);
@@ -111,6 +172,9 @@ STATIC mp_obj_t range_getiter(mp_obj_t o_in) {
const mp_obj_type_t mp_type_range = {
{ &mp_type_type },
.name = MP_QSTR_range,
+ .print = range_print,
.make_new = range_make_new,
+ .unary_op = range_unary_op,
+ .subscr = range_subscr,
.getiter = range_getiter,
};
diff --git a/py/objreversed.c b/py/objreversed.c
new file mode 100644
index 000000000..3f758699d
--- /dev/null
+++ b/py/objreversed.c
@@ -0,0 +1,74 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "mpconfig.h"
+#include "nlr.h"
+#include "misc.h"
+#include "qstr.h"
+#include "obj.h"
+#include "runtime.h"
+
+typedef struct _mp_obj_reversed_t {
+ mp_obj_base_t base;
+ mp_obj_t seq; // sequence object that we are reversing
+ mp_uint_t cur_index; // current index, plus 1; 0=no more, 1=last one (index 0)
+} mp_obj_reversed_t;
+
+STATIC mp_obj_t reversed_make_new(mp_obj_t type_in, uint n_args, uint n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ mp_obj_reversed_t *o = m_new_obj(mp_obj_reversed_t);
+ o->base.type = &mp_type_reversed;
+ o->seq = args[0];
+ o->cur_index = mp_obj_get_int(mp_obj_len(args[0])); // start at the end of the sequence
+
+ return o;
+}
+
+STATIC mp_obj_t reversed_iternext(mp_obj_t self_in) {
+ assert(MP_OBJ_IS_TYPE(self_in, &mp_type_reversed));
+ mp_obj_reversed_t *self = self_in;
+
+ // "raise" stop iteration if we are at the end (the start) of the sequence
+ if (self->cur_index == 0) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+
+ // pre-decrement and index sequence
+ self->cur_index -= 1;
+ return mp_obj_subscr(self->seq, MP_OBJ_NEW_SMALL_INT(self->cur_index), MP_OBJ_SENTINEL);
+}
+
+const mp_obj_type_t mp_type_reversed = {
+ { &mp_type_type },
+ .name = MP_QSTR_reversed,
+ .make_new = reversed_make_new,
+ .getiter = mp_identity,
+ .iternext = reversed_iternext,
+};
diff --git a/py/objstr.c b/py/objstr.c
index 9d3460988..e88479459 100644
--- a/py/objstr.c
+++ b/py/objstr.c
@@ -290,10 +290,16 @@ mp_obj_t mp_obj_str_binary_op(int op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
break;
case MP_BINARY_OP_MULTIPLY: {
- if (!MP_OBJ_IS_SMALL_INT(rhs_in)) {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs_in, &n)) {
return MP_OBJ_NULL; // op not supported
}
- int n = MP_OBJ_SMALL_INT_VALUE(rhs_in);
+ if (n <= 0) {
+ if (lhs_type == &mp_type_str) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_); // empty str
+ }
+ n = 0;
+ }
byte *data;
mp_obj_t s = mp_obj_str_builder_start(lhs_type, lhs_len * n, &data);
mp_seq_multiply(lhs_data, sizeof(*lhs_data), lhs_len, n, data);
@@ -353,7 +359,8 @@ const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, u
}
#endif
-STATIC mp_obj_t str_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+// This is used for both bytes and 8-bit strings. This is not used for unicode strings.
+STATIC mp_obj_t bytes_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
mp_obj_type_t *type = mp_obj_get_type(self_in);
GET_STR_DATA_LEN(self_in, self_data, self_len);
if (value == MP_OBJ_SENTINEL) {
@@ -368,11 +375,12 @@ STATIC mp_obj_t str_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
return mp_obj_new_str_of_type(type, self_data + slice.start, slice.stop - slice.start);
}
#endif
- const byte *p = str_index_to_ptr(type, self_data, self_len, index, false);
- if (type == &mp_type_bytes) {
- return MP_OBJ_NEW_SMALL_INT(*p);
+ mp_uint_t index_val = mp_get_index(type, self_len, index, false);
+ // If we have unicode enabled the type will always be bytes, so take the short cut.
+ if (MICROPY_PY_BUILTINS_STR_UNICODE || type == &mp_type_bytes) {
+ return MP_OBJ_NEW_SMALL_INT(self_data[index_val]);
} else {
- return mp_obj_new_str((char*)p, 1, true);
+ return mp_obj_new_str((char*)&self_data[index_val], 1, true);
}
} else {
return MP_OBJ_NULL; // op not supported
@@ -1704,7 +1712,7 @@ const mp_obj_type_t mp_type_str = {
.print = str_print,
.make_new = str_make_new,
.binary_op = mp_obj_str_binary_op,
- .subscr = str_subscr,
+ .subscr = bytes_subscr,
.getiter = mp_obj_new_str_iterator,
.buffer_p = { .get_buffer = mp_obj_str_get_buffer },
.locals_dict = (mp_obj_t)&str_locals_dict,
@@ -1718,7 +1726,7 @@ const mp_obj_type_t mp_type_bytes = {
.print = str_print,
.make_new = bytes_make_new,
.binary_op = mp_obj_str_binary_op,
- .subscr = str_subscr,
+ .subscr = bytes_subscr,
.getiter = mp_obj_new_bytes_iterator,
.buffer_p = { .get_buffer = mp_obj_str_get_buffer },
.locals_dict = (mp_obj_t)&str_locals_dict,
diff --git a/py/objtuple.c b/py/objtuple.c
index 3dade2f74..377fbf543 100644
--- a/py/objtuple.c
+++ b/py/objtuple.c
@@ -137,10 +137,13 @@ mp_obj_t mp_obj_tuple_binary_op(int op, mp_obj_t lhs, mp_obj_t rhs) {
return s;
}
case MP_BINARY_OP_MULTIPLY: {
- if (!MP_OBJ_IS_SMALL_INT(rhs)) {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs, &n)) {
return MP_OBJ_NULL; // op not supported
}
- int n = MP_OBJ_SMALL_INT_VALUE(rhs);
+ if (n <= 0) {
+ return mp_const_empty_tuple;
+ }
mp_obj_tuple_t *s = mp_obj_new_tuple(o->len * n, NULL);
mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
return s;
diff --git a/py/parse.c b/py/parse.c
index 2a74c59a8..26713dfe7 100644
--- a/py/parse.c
+++ b/py/parse.c
@@ -578,7 +578,7 @@ mp_parse_node_t mp_parse(mp_lexer_t *lex, mp_parse_input_kind_t input_kind, mp_p
// never emit these rules if they have only 1 argument
// NOTE: can't put atom_paren here because we need it to distinguisg, for example, [a,b] from [(a,b)]
// TODO possibly put varargslist_name, varargslist_equal here as well
- if (rule->rule_id == RULE_else_stmt || rule->rule_id == RULE_testlist_comp_3b || rule->rule_id == RULE_import_as_names_paren || rule->rule_id == RULE_typedargslist_name || rule->rule_id == RULE_typedargslist_colon || rule->rule_id == RULE_typedargslist_equal || rule->rule_id == RULE_dictorsetmaker_colon || rule->rule_id == RULE_classdef_2 || rule->rule_id == RULE_with_item_as || rule->rule_id == RULE_assert_stmt_extra || rule->rule_id == RULE_as_name || rule->rule_id == RULE_raise_stmt_from || rule->rule_id == RULE_vfpdef) {
+ if (rule->rule_id == RULE_else_stmt || rule->rule_id == RULE_testlist_comp_3b || rule->rule_id == RULE_import_as_names_paren || rule->rule_id == RULE_typedargslist_name || rule->rule_id == RULE_typedargslist_colon || rule->rule_id == RULE_typedargslist_equal || rule->rule_id == RULE_dictorsetmaker_colon || rule->rule_id == RULE_classdef_2 || rule->rule_id == RULE_with_item_as || rule->rule_id == RULE_assert_stmt_extra || rule->rule_id == RULE_as_name || rule->rule_id == RULE_raise_stmt_from || rule->rule_id == RULE_vfpdef || rule->rule_id == RULE_funcdefrettype) {
emit_rule = false;
}
diff --git a/py/py.mk b/py/py.mk
index f1ae3c3f1..08eeddc29 100644
--- a/py/py.mk
+++ b/py/py.mk
@@ -72,6 +72,7 @@ PY_O_BASENAME = \
objnone.o \
objnamedtuple.o \
objrange.o \
+ objreversed.o \
objset.o \
objslice.o \
objstr.o \
@@ -103,7 +104,8 @@ PY_O_BASENAME = \
smallint.o \
pfenv.o \
pfenv_printf.o \
- ../extmod/moductypes.o
+ ../extmod/moductypes.o \
+ ../extmod/modzlibd.o \
# prepend the build destination prefix to the py object files
PY_O = $(addprefix $(PY_BUILD)/, $(PY_O_BASENAME))
diff --git a/py/qstrdefs.h b/py/qstrdefs.h
index 6470cb950..c83b54c24 100644
--- a/py/qstrdefs.h
+++ b/py/qstrdefs.h
@@ -67,9 +67,13 @@ Q(__lt__)
Q(micropython)
Q(bytecode)
+Q(const)
+
+#if MICROPY_EMIT_X64 || MICROPY_EMIT_THUMB
Q(native)
Q(viper)
-Q(const)
+Q(uint)
+#endif
#if MICROPY_EMIT_INLINE_THUMB
Q(asm_thumb)
@@ -136,7 +140,11 @@ Q(calcsize)
Q(chr)
Q(classmethod)
Q(_collections)
+#if MICROPY_PY_BUILTINS_COMPLEX
Q(complex)
+Q(real)
+Q(imag)
+#endif
Q(dict)
Q(dir)
Q(divmod)
@@ -144,7 +152,9 @@ Q(enumerate)
Q(eval)
Q(exec)
Q(filter)
+#if MICROPY_PY_BUILTINS_FLOAT
Q(float)
+#endif
Q(from_bytes)
Q(getattr)
Q(globals)
@@ -175,7 +185,7 @@ Q(print)
Q(range)
Q(read)
Q(repr)
-Q(set)
+Q(reversed)
Q(sorted)
Q(staticmethod)
Q(sum)
@@ -224,20 +234,8 @@ Q(reverse)
Q(add)
Q(clear)
Q(copy)
-Q(discard)
-Q(difference)
-Q(difference_update)
-Q(intersection)
-Q(intersection_update)
-Q(isdisjoint)
-Q(issubset)
-Q(issuperset)
Q(pop)
Q(remove)
-Q(symmetric_difference)
-Q(symmetric_difference_update)
-Q(union)
-Q(update)
Q(find)
Q(rfind)
Q(rindex)
@@ -267,6 +265,22 @@ Q(iterator)
Q(module)
Q(slice)
+#if MICROPY_PY_BUILTINS_SET
+Q(discard)
+Q(difference)
+Q(difference_update)
+Q(intersection)
+Q(intersection_update)
+Q(isdisjoint)
+Q(issubset)
+Q(issuperset)
+Q(set)
+Q(symmetric_difference)
+Q(symmetric_difference_update)
+Q(union)
+Q(update)
+#endif
+
#if MICROPY_PY_BUILTINS_FROZENSET
Q(frozenset)
#endif
@@ -322,9 +336,11 @@ Q(polar)
Q(rect)
#endif
+#if MICROPY_MEM_STATS
Q(mem_total)
Q(mem_current)
Q(mem_peak)
+#endif
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
Q(alloc_emergency_exception_buf)
@@ -438,3 +454,8 @@ Q(getter)
Q(setter)
Q(deleter)
#endif
+
+#if MICROPY_PY_ZLIBD
+Q(zlibd)
+Q(decompress)
+#endif
diff --git a/py/runtime.c b/py/runtime.c
index 59e47c7ff..8dbdac9d9 100644
--- a/py/runtime.c
+++ b/py/runtime.c
@@ -517,12 +517,6 @@ mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2) {
return mp_call_function_n_kw(fun, 2, 0, args);
}
-// wrapper that accepts n_args and n_kw in one argument
-// native emitter can only pass at most 3 arguments to a function
-mp_obj_t mp_call_function_n_kw_for_native(mp_obj_t fun_in, uint n_args_kw, const mp_obj_t *args) {
- return mp_call_function_n_kw(fun_in, n_args_kw & 0xff, (n_args_kw >> 8) & 0xff, args);
-}
-
// args contains, eg: arg0 arg1 key0 value0 key1 value1
mp_obj_t mp_call_function_n_kw(mp_obj_t fun_in, uint n_args, uint n_kw, const mp_obj_t *args) {
// TODO improve this: fun object can specify its type and we parse here the arguments,
@@ -1163,17 +1157,56 @@ NORETURN void mp_not_implemented(const char *msg) {
nlr_raise(mp_obj_new_exception_msg(&mp_type_NotImplementedError, msg));
}
-// these must correspond to the respective enum
+// convert a Micro Python object to a valid native value based on type
+mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) {
+ DEBUG_printf("mp_convert_obj_to_native(%p, " UINT_FMT ")\n", obj, type);
+ switch (type & 3) {
+ case MP_NATIVE_TYPE_OBJ: return (mp_uint_t)obj;
+ case MP_NATIVE_TYPE_BOOL:
+ case MP_NATIVE_TYPE_INT:
+ case MP_NATIVE_TYPE_UINT: return mp_obj_get_int(obj);
+ default: assert(0); return 0;
+ }
+}
+
+// convert a native value to a Micro Python object based on type
+mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) {
+ DEBUG_printf("mp_convert_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type);
+ switch (type & 3) {
+ case MP_NATIVE_TYPE_OBJ: return (mp_obj_t)val;
+ case MP_NATIVE_TYPE_BOOL: return MP_BOOL(val);
+ case MP_NATIVE_TYPE_INT: return mp_obj_new_int(val);
+ case MP_NATIVE_TYPE_UINT: return mp_obj_new_int_from_uint(val);
+ default: assert(0); return mp_const_none;
+ }
+}
+
+// wrapper that accepts n_args and n_kw in one argument
+// (native emitter can only pass at most 3 arguments to a function)
+mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, uint n_args_kw, const mp_obj_t *args) {
+ return mp_call_function_n_kw(fun_in, n_args_kw & 0xff, (n_args_kw >> 8) & 0xff, args);
+}
+
+// wrapper that makes raise obj and raises it
+NORETURN void mp_native_raise(mp_obj_t o) {
+ nlr_raise(mp_make_raise_obj(o));
+}
+
+// these must correspond to the respective enum in runtime0.h
void *const mp_fun_table[MP_F_NUMBER_OF] = {
+ mp_convert_obj_to_native,
+ mp_convert_native_to_obj,
mp_load_const_int,
mp_load_const_dec,
mp_load_const_str,
+ mp_load_const_bytes,
mp_load_name,
mp_load_global,
mp_load_build_class,
mp_load_attr,
mp_load_method,
mp_store_name,
+ mp_store_global,
mp_store_attr,
mp_obj_subscr,
mp_obj_is_true,
@@ -1189,10 +1222,13 @@ void *const mp_fun_table[MP_F_NUMBER_OF] = {
mp_obj_set_store,
#endif
mp_make_function_from_raw_code,
- mp_call_function_n_kw_for_native,
+ mp_native_call_function_n_kw,
mp_call_method_n_kw,
mp_getiter,
mp_iternext,
+ nlr_push,
+ nlr_pop,
+ mp_native_raise,
mp_import_name,
mp_import_from,
mp_import_all,
diff --git a/py/runtime.h b/py/runtime.h
index 2feb24de9..c46087d14 100644
--- a/py/runtime.h
+++ b/py/runtime.h
@@ -87,7 +87,6 @@ mp_obj_t mp_make_function_var_between(int n_args_min, int n_args_max, mp_fun_var
mp_obj_t mp_call_function_0(mp_obj_t fun);
mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg);
mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2);
-mp_obj_t mp_call_function_n_kw_for_native(mp_obj_t fun_in, uint n_args_kw, const mp_obj_t *args);
mp_obj_t mp_call_function_n_kw(mp_obj_t fun, uint n_args, uint n_kw, const mp_obj_t *args);
mp_obj_t mp_call_method_n_kw(uint n_args, uint n_kw, const mp_obj_t *args);
mp_obj_t mp_call_method_n_kw_var(bool have_self, uint n_args_n_kw, const mp_obj_t *args);
@@ -115,6 +114,12 @@ void mp_import_all(mp_obj_t module);
// Raise NotImplementedError with given message
NORETURN void mp_not_implemented(const char *msg);
+// helper functions for native/viper code
+mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type);
+mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type);
+mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, uint n_args_kw, const mp_obj_t *args);
+NORETURN void mp_native_raise(mp_obj_t o);
+
extern struct _mp_obj_list_t mp_sys_path_obj;
extern struct _mp_obj_list_t mp_sys_argv_obj;
#define mp_sys_path ((mp_obj_t)&mp_sys_path_obj)
diff --git a/py/runtime0.h b/py/runtime0.h
index eea578237..7bb16545f 100644
--- a/py/runtime0.h
+++ b/py/runtime0.h
@@ -39,6 +39,12 @@
*/
#define MP_SCOPE_FLAG_NOFREE 0x40
+// types for native (viper) function signature
+#define MP_NATIVE_TYPE_OBJ (0x00)
+#define MP_NATIVE_TYPE_BOOL (0x01)
+#define MP_NATIVE_TYPE_INT (0x02)
+#define MP_NATIVE_TYPE_UINT (0x03)
+
typedef enum {
MP_UNARY_OP_BOOL, // __bool__
MP_UNARY_OP_LEN, // __len__
@@ -96,15 +102,19 @@ typedef enum {
} mp_binary_op_t;
typedef enum {
- MP_F_LOAD_CONST_INT = 0,
+ MP_F_CONVERT_OBJ_TO_NATIVE = 0,
+ MP_F_CONVERT_NATIVE_TO_OBJ,
+ MP_F_LOAD_CONST_INT,
MP_F_LOAD_CONST_DEC,
MP_F_LOAD_CONST_STR,
+ MP_F_LOAD_CONST_BYTES,
MP_F_LOAD_NAME,
MP_F_LOAD_GLOBAL,
MP_F_LOAD_BUILD_CLASS,
MP_F_LOAD_ATTR,
MP_F_LOAD_METHOD,
MP_F_STORE_NAME,
+ MP_F_STORE_GLOBAL,
MP_F_STORE_ATTR,
MP_F_OBJ_SUBSCR,
MP_F_OBJ_IS_TRUE,
@@ -120,10 +130,13 @@ typedef enum {
MP_F_STORE_SET,
#endif
MP_F_MAKE_FUNCTION_FROM_RAW_CODE,
- MP_F_CALL_FUNCTION_N_KW_FOR_NATIVE,
+ MP_F_NATIVE_CALL_FUNCTION_N_KW,
MP_F_CALL_METHOD_N_KW,
MP_F_GETITER,
MP_F_ITERNEXT,
+ MP_F_NLR_PUSH,
+ MP_F_NLR_POP,
+ MP_F_NATIVE_RAISE,
MP_F_IMPORT_NAME,
MP_F_IMPORT_FROM,
MP_F_IMPORT_ALL,
diff --git a/py/scope.c b/py/scope.c
index 83c2b6e07..2f184d164 100644
--- a/py/scope.c
+++ b/py/scope.c
@@ -83,11 +83,10 @@ void scope_free(scope_t *scope) {
}
id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, bool *added) {
- for (int i = 0; i < scope->id_info_len; i++) {
- if (scope->id_info[i].qstr == qstr) {
- *added = false;
- return &scope->id_info[i];
- }
+ id_info_t *id_info = scope_find(scope, qstr);
+ if (id_info != NULL) {
+ *added = false;
+ return id_info;
}
// make sure we have enough memory
@@ -99,7 +98,7 @@ id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, bool *added) {
// add new id to end of array of all ids; this seems to match CPython
// important thing is that function arguments are first, but that is
// handled by the compiler because it adds arguments before compiling the body
- id_info_t *id_info = &scope->id_info[scope->id_info_len++];
+ id_info = &scope->id_info[scope->id_info_len++];
id_info->kind = 0;
id_info->flags = 0;
@@ -110,7 +109,7 @@ id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, bool *added) {
}
id_info_t *scope_find(scope_t *scope, qstr qstr) {
- for (int i = 0; i < scope->id_info_len; i++) {
+ for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
if (scope->id_info[i].qstr == qstr) {
return &scope->id_info[i];
}
@@ -122,12 +121,7 @@ id_info_t *scope_find_global(scope_t *scope, qstr qstr) {
while (scope->parent != NULL) {
scope = scope->parent;
}
- for (int i = 0; i < scope->id_info_len; i++) {
- if (scope->id_info[i].qstr == qstr) {
- return &scope->id_info[i];
- }
- }
- return NULL;
+ return scope_find(scope, qstr);
}
id_info_t *scope_find_local_in_parent(scope_t *scope, qstr qstr) {
@@ -135,10 +129,9 @@ id_info_t *scope_find_local_in_parent(scope_t *scope, qstr qstr) {
return NULL;
}
for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) {
- for (int i = 0; i < s->id_info_len; i++) {
- if (s->id_info[i].qstr == qstr) {
- return &s->id_info[i];
- }
+ id_info_t *id = scope_find(s, qstr);
+ if (id != NULL) {
+ return id;
}
}
return NULL;
@@ -147,18 +140,10 @@ id_info_t *scope_find_local_in_parent(scope_t *scope, qstr qstr) {
void scope_close_over_in_parents(scope_t *scope, qstr qstr) {
assert(scope->parent != NULL); // we should have at least 1 parent
for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) {
- id_info_t *id = NULL;
- for (int i = 0; i < s->id_info_len; i++) {
- if (s->id_info[i].qstr == qstr) {
- id = &s->id_info[i];
- break;
- }
- }
- if (id == NULL) {
- // variable not declared in this scope, so declare it as free and keep searching parents
- bool added;
- id = scope_find_or_add_id(s, qstr, &added);
- assert(added);
+ bool added;
+ id_info_t *id = scope_find_or_add_id(s, qstr, &added);
+ if (added) {
+ // variable not previously declared in this scope, so declare it as free and keep searching parents
id->kind = ID_INFO_KIND_FREE;
} else {
// variable is declared in this scope, so finish