Improve configurability for native x64/thumb emitter.

With MICROPY_EMIT_X64 and MICROPY_EMIT_THUMB disabled, the respective
emitters and assemblers will not be included in the code.  This can
significantly reduce binary size for unix version.
diff --git a/py/asmthumb.c b/py/asmthumb.c
index ee8041a..ba95d80 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -7,6 +7,9 @@
 #include "mpconfig.h"
 #include "asmthumb.h"
 
+// wrapper around everything in this file
+#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+
 #define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
 #define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
 #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
@@ -447,3 +450,5 @@
         asm_thumb_write_op16(as, OP_SVC(fun_id));
     }
 }
+
+#endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
diff --git a/py/asmx64.c b/py/asmx64.c
index c425034..ed9ca80 100644
--- a/py/asmx64.c
+++ b/py/asmx64.c
@@ -6,6 +6,10 @@
 
 #include "misc.h"
 #include "asmx64.h"
+#include "mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X64
 
 #if defined(__OpenBSD__) || defined(__MACH__)
 #define MAP_ANONYMOUS MAP_ANON
@@ -620,3 +624,5 @@
     asm_x64_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4));
     */
 }
+
+#endif // MICROPY_EMIT_X64
diff --git a/py/compile.c b/py/compile.c
index 68ac208..8db7c6d 100644
--- a/py/compile.c
+++ b/py/compile.c
@@ -3083,11 +3083,13 @@
     // compile pass 2 and 3
 #if !MICROPY_EMIT_CPYTHON
     emit_t *emit_bc = NULL;
+#if MICROPY_EMIT_NATIVE
     emit_t *emit_native = NULL;
 #endif
 #if MICROPY_EMIT_INLINE_THUMB
     emit_inline_asm_t *emit_inline_thumb = NULL;
 #endif
+#endif // !MICROPY_EMIT_CPYTHON
     for (scope_t *s = comp->scope_head; s != NULL && !comp->had_error; s = s->next) {
         if (false) {
             // dummy
@@ -3115,6 +3117,8 @@
             comp->emit_method_table = &emit_cpython_method_table;
 #else
             switch (s->emit_options) {
+
+#if MICROPY_EMIT_NATIVE
                 case EMIT_OPT_NATIVE_PYTHON:
                 case EMIT_OPT_VIPER:
 #if MICROPY_EMIT_X64
@@ -3131,6 +3135,7 @@
                     comp->emit = emit_native;
                     comp->emit_method_table->set_native_types(comp->emit, s->emit_options == EMIT_OPT_VIPER);
                     break;
+#endif // MICROPY_EMIT_NATIVE
 
                 default:
                     if (emit_bc == NULL) {
@@ -3140,7 +3145,7 @@
                     comp->emit_method_table = &emit_bc_method_table;
                     break;
             }
-#endif
+#endif // !MICROPY_EMIT_CPYTHON
 
             // compile pass 2 and pass 3
             compile_scope(comp, s, PASS_2);
diff --git a/py/emitcpy.c b/py/emitcpy.c
index 652617c..7b2d50f 100644
--- a/py/emitcpy.c
+++ b/py/emitcpy.c
@@ -13,6 +13,7 @@
 #include "runtime0.h"
 #include "emit.h"
 
+// wrapper around everything in this file
 #if MICROPY_EMIT_CPYTHON
 
 struct _emit_t {
diff --git a/py/emitnative.c b/py/emitnative.c
index a29922d..cc00c57 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -34,7 +34,7 @@
 #include "runtime.h"
 
 // wrapper around everything in this file
-#if N_X64 || N_THUMB
+#if (MICROPY_EMIT_X64 && N_X64) || (MICROPY_EMIT_THUMB && N_THUMB)
 
 #if N_X64
 
@@ -1319,4 +1319,4 @@
     emit_native_yield_from,
 };
 
-#endif // N_X64 || N_THUMB
+#endif // (MICROPY_EMIT_X64 && N_X64) || (MICROPY_EMIT_THUMB && N_THUMB)
diff --git a/unix/Makefile b/unix/Makefile
index 38d6ba8..a5c6ddc 100644
--- a/unix/Makefile
+++ b/unix/Makefile
@@ -83,10 +83,10 @@
 $(BUILD)/%.o: $(PYSRC)/%.c mpconfigport.h
 	$(CC) $(CFLAGS) -c -o $@ $<
 
-$(BUILD)/emitnx64.o: $(PYSRC)/emitnative.c $(PYSRC)/emit.h
+$(BUILD)/emitnx64.o: $(PYSRC)/emitnative.c $(PYSRC)/emit.h mpconfigport.h
 	$(CC) $(CFLAGS) -DN_X64 -c -o $@ $<
 
-$(BUILD)/emitnthumb.o: $(PYSRC)/emitnative.c $(PYSRC)/emit.h
+$(BUILD)/emitnthumb.o: $(PYSRC)/emitnative.c $(PYSRC)/emit.h mpconfigport.h
 	$(CC) $(CFLAGS) -DN_THUMB -c -o $@ $<
 
 # optimising vm for speed, adds only a small amount to code size but makes a huge difference to speed (20% faster)