summaryrefslogtreecommitdiff
path: root/05/tcc-0.9.25/lib
diff options
context:
space:
mode:
authorpommicket <pommicket@gmail.com>2022-02-17 13:22:13 -0500
committerpommicket <pommicket@gmail.com>2022-02-17 13:22:13 -0500
commite900dd8d6f2ff7cef66fbd31898d375b71ef53d6 (patch)
treeae732e8f5d6163acb78355ae4e5ca69d6e0c995e /05/tcc-0.9.25/lib
parent6e1158f49aa014b801b171b358c47389e7f9964e (diff)
procuding a (non-working) executable for tcc
Diffstat (limited to '05/tcc-0.9.25/lib')
-rw-r--r--05/tcc-0.9.25/lib/alloca86-bt.S45
-rw-r--r--05/tcc-0.9.25/lib/alloca86.S33
-rw-r--r--05/tcc-0.9.25/lib/bcheck.c868
-rw-r--r--05/tcc-0.9.25/lib/libtcc1.c607
4 files changed, 1553 insertions, 0 deletions
diff --git a/05/tcc-0.9.25/lib/alloca86-bt.S b/05/tcc-0.9.25/lib/alloca86-bt.S
new file mode 100644
index 0000000..994da20
--- /dev/null
+++ b/05/tcc-0.9.25/lib/alloca86-bt.S
@@ -0,0 +1,45 @@
+/* ---------------------------------------------- */
+/* alloca86b.S */
+
+#include "../config.h"
+
+.globl __bound_alloca
+
+__bound_alloca:
+ pop %edx
+ pop %eax
+ mov %eax, %ecx
+ add $3,%eax
+ and $-4,%eax
+ jz p6
+
+#ifdef TCC_TARGET_PE
+p4:
+ cmp $4096,%eax
+ jle p5
+ sub $4096,%esp
+ sub $4096,%eax
+ test %eax,(%esp)
+ jmp p4
+
+p5:
+#endif
+
+ sub %eax,%esp
+ mov %esp,%eax
+
+ push %edx
+ push %eax
+ push %ecx
+ push %eax
+ call __bound_new_region
+ add $8, %esp
+ pop %eax
+ pop %edx
+
+p6:
+ push %edx
+ push %edx
+ ret
+
+/* ---------------------------------------------- */
diff --git a/05/tcc-0.9.25/lib/alloca86.S b/05/tcc-0.9.25/lib/alloca86.S
new file mode 100644
index 0000000..fb208a0
--- /dev/null
+++ b/05/tcc-0.9.25/lib/alloca86.S
@@ -0,0 +1,33 @@
+/* ---------------------------------------------- */
+/* alloca86.S */
+
+#include "../config.h"
+
+.globl alloca
+
+alloca:
+ pop %edx
+ pop %eax
+ add $3,%eax
+ and $-4,%eax
+ jz p3
+
+#ifdef TCC_TARGET_PE
+p1:
+ cmp $4096,%eax
+ jle p2
+ sub $4096,%esp
+ sub $4096,%eax
+ test %eax,(%esp)
+ jmp p1
+p2:
+#endif
+
+ sub %eax,%esp
+ mov %esp,%eax
+p3:
+ push %edx
+ push %edx
+ ret
+
+/* ---------------------------------------------- */
diff --git a/05/tcc-0.9.25/lib/bcheck.c b/05/tcc-0.9.25/lib/bcheck.c
new file mode 100644
index 0000000..0ec2a4b
--- /dev/null
+++ b/05/tcc-0.9.25/lib/bcheck.c
@@ -0,0 +1,868 @@
+/*
+ * Tiny C Memory and bounds checker
+ *
+ * Copyright (c) 2002 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#if !defined(__FreeBSD__) && !defined(__DragonFly__) && !defined(__OpenBSD__)
+#include <malloc.h>
+#endif
+
+//#define BOUND_DEBUG
+
+/* define so that bound array is static (faster, but use memory if
+ bound checking not used) */
+//#define BOUND_STATIC
+
+/* use malloc hooks. Currently the code cannot be reliable if no hooks */
+#define CONFIG_TCC_MALLOC_HOOKS
+
+#define HAVE_MEMALIGN
+
+#if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__dietlibc__) \
+ || defined(__UCLIBC__) || defined(__OpenBSD__)
+#warning Bound checking not fully supported in this environment.
+#undef CONFIG_TCC_MALLOC_HOOKS
+#undef HAVE_MEMALIGN
+#endif
+
+#define BOUND_T1_BITS 13
+#define BOUND_T2_BITS 11
+#define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
+
+#define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
+#define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
+#define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
+#define BOUND_E_BITS 4
+
+#define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
+#define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
+
+
+/* this pointer is generated when bound check is incorrect */
+#define INVALID_POINTER ((void *)(-2))
+/* size of an empty region */
+#define EMPTY_SIZE 0xffffffff
+/* size of an invalid region */
+#define INVALID_SIZE 0
+
+typedef struct BoundEntry {
+ unsigned long start;
+ unsigned long size;
+ struct BoundEntry *next;
+ unsigned long is_invalid; /* true if pointers outside region are invalid */
+} BoundEntry;
+
+/* external interface */
+void __bound_init(void);
+void __bound_new_region(void *p, unsigned long size);
+int __bound_delete_region(void *p);
+
+#define FASTCALL __attribute__((regparm(3)))
+
+void *__bound_malloc(size_t size, const void *caller);
+void *__bound_memalign(size_t size, size_t align, const void *caller);
+void __bound_free(void *ptr, const void *caller);
+void *__bound_realloc(void *ptr, size_t size, const void *caller);
+static void *libc_malloc(size_t size);
+static void libc_free(void *ptr);
+static void install_malloc_hooks(void);
+static void restore_malloc_hooks(void);
+
+#ifdef CONFIG_TCC_MALLOC_HOOKS
+static void *saved_malloc_hook;
+static void *saved_free_hook;
+static void *saved_realloc_hook;
+static void *saved_memalign_hook;
+#endif
+
+/* linker definitions */
+extern char _end;
+
+/* TCC definitions */
+extern char __bounds_start; /* start of static bounds table */
+/* error message, just for TCC */
+const char *__bound_error_msg;
+
+/* runtime error output */
+extern void rt_error(unsigned long pc, const char *fmt, ...);
+
+#ifdef BOUND_STATIC
+static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
+#else
+static BoundEntry **__bound_t1; /* page table */
+#endif
+static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
+static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
+
+static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
+{
+ unsigned long addr, tmp;
+ BoundEntry *e;
+
+ e = e1;
+ while (e != NULL) {
+ addr = (unsigned long)p;
+ addr -= e->start;
+ if (addr <= e->size) {
+ /* put region at the head */
+ tmp = e1->start;
+ e1->start = e->start;
+ e->start = tmp;
+ tmp = e1->size;
+ e1->size = e->size;
+ e->size = tmp;
+ return e1;
+ }
+ e = e->next;
+ }
+ /* no entry found: return empty entry or invalid entry */
+ if (e1->is_invalid)
+ return __bound_invalid_t2;
+ else
+ return __bound_empty_t2;
+}
+
+/* print a bound error message */
+static void bound_error(const char *fmt, ...)
+{
+ __bound_error_msg = fmt;
+ *(int *)0 = 0; /* force a runtime error */
+}
+
+static void bound_alloc_error(void)
+{
+ bound_error("not enough memory for bound checking code");
+}
+
+/* currently, tcc cannot compile that because we use GNUC extensions */
+#if !defined(__TINYC__)
+
+/* return '(p + offset)' for pointer arithmetic (a pointer can reach
+ the end of a region in this case */
+void * FASTCALL __bound_ptr_add(void *p, int offset)
+{
+ unsigned long addr = (unsigned long)p;
+ BoundEntry *e;
+#if defined(BOUND_DEBUG)
+ printf("add: 0x%x %d\n", (int)p, offset);
+#endif
+
+ e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
+ e = (BoundEntry *)((char *)e +
+ ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
+ addr -= e->start;
+ if (addr > e->size) {
+ e = __bound_find_region(e, p);
+ addr = (unsigned long)p - e->start;
+ }
+ addr += offset;
+ if (addr > e->size)
+ return INVALID_POINTER; /* return an invalid pointer */
+ return p + offset;
+}
+
+/* return '(p + offset)' for pointer indirection (the resulting must
+ be strictly inside the region */
+#define BOUND_PTR_INDIR(dsize) \
+void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
+{ \
+ unsigned long addr = (unsigned long)p; \
+ BoundEntry *e; \
+ \
+ e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
+ e = (BoundEntry *)((char *)e + \
+ ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
+ addr -= e->start; \
+ if (addr > e->size) { \
+ e = __bound_find_region(e, p); \
+ addr = (unsigned long)p - e->start; \
+ } \
+ addr += offset + dsize; \
+ if (addr > e->size) \
+ return INVALID_POINTER; /* return an invalid pointer */ \
+ return p + offset; \
+}
+
+#ifdef __i386__
+/* return the frame pointer of the caller */
+#define GET_CALLER_FP(fp)\
+{\
+ unsigned long *fp1;\
+ __asm__ __volatile__ ("movl %%ebp,%0" :"=g" (fp1));\
+ fp = fp1[0];\
+}
+#else
+#error put code to extract the calling frame pointer
+#endif
+
+/* called when entering a function to add all the local regions */
+void FASTCALL __bound_local_new(void *p1)
+{
+ unsigned long addr, size, fp, *p = p1;
+ GET_CALLER_FP(fp);
+ for(;;) {
+ addr = p[0];
+ if (addr == 0)
+ break;
+ addr += fp;
+ size = p[1];
+ p += 2;
+ __bound_new_region((void *)addr, size);
+ }
+}
+
+/* called when leaving a function to delete all the local regions */
+void FASTCALL __bound_local_delete(void *p1)
+{
+ unsigned long addr, fp, *p = p1;
+ GET_CALLER_FP(fp);
+ for(;;) {
+ addr = p[0];
+ if (addr == 0)
+ break;
+ addr += fp;
+ p += 2;
+ __bound_delete_region((void *)addr);
+ }
+}
+
+#else
+
+void __bound_local_new(void *p)
+{
+}
+void __bound_local_delete(void *p)
+{
+}
+
+void *__bound_ptr_add(void *p, int offset)
+{
+ return p + offset;
+}
+
+#define BOUND_PTR_INDIR(dsize) \
+void *__bound_ptr_indir ## dsize (void *p, int offset) \
+{ \
+ return p + offset; \
+}
+#endif
+
+BOUND_PTR_INDIR(1)
+BOUND_PTR_INDIR(2)
+BOUND_PTR_INDIR(4)
+BOUND_PTR_INDIR(8)
+BOUND_PTR_INDIR(12)
+BOUND_PTR_INDIR(16)
+
+static BoundEntry *__bound_new_page(void)
+{
+ BoundEntry *page;
+ int i;
+
+ page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
+ if (!page)
+ bound_alloc_error();
+ for(i=0;i<BOUND_T2_SIZE;i++) {
+ /* put empty entries */
+ page[i].start = 0;
+ page[i].size = EMPTY_SIZE;
+ page[i].next = NULL;
+ page[i].is_invalid = 0;
+ }
+ return page;
+}
+
+/* currently we use malloc(). Should use bound_new_page() */
+static BoundEntry *bound_new_entry(void)
+{
+ BoundEntry *e;
+ e = libc_malloc(sizeof(BoundEntry));
+ return e;
+}
+
+static void bound_free_entry(BoundEntry *e)
+{
+ libc_free(e);
+}
+
+static inline BoundEntry *get_page(int index)
+{
+ BoundEntry *page;
+ page = __bound_t1[index];
+ if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
+ /* create a new page if necessary */
+ page = __bound_new_page();
+ __bound_t1[index] = page;
+ }
+ return page;
+}
+
+/* mark a region as being invalid (can only be used during init) */
+static void mark_invalid(unsigned long addr, unsigned long size)
+{
+ unsigned long start, end;
+ BoundEntry *page;
+ int t1_start, t1_end, i, j, t2_start, t2_end;
+
+ start = addr;
+ end = addr + size;
+
+ t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
+ if (end != 0)
+ t2_end = end >> BOUND_T3_BITS;
+ else
+ t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
+
+#if 0
+ printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
+#endif
+
+ /* first we handle full pages */
+ t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
+ t1_end = t2_end >> BOUND_T2_BITS;
+
+ i = t2_start & (BOUND_T2_SIZE - 1);
+ j = t2_end & (BOUND_T2_SIZE - 1);
+
+ if (t1_start == t1_end) {
+ page = get_page(t2_start >> BOUND_T2_BITS);
+ for(; i < j; i++) {
+ page[i].size = INVALID_SIZE;
+ page[i].is_invalid = 1;
+ }
+ } else {
+ if (i > 0) {
+ page = get_page(t2_start >> BOUND_T2_BITS);
+ for(; i < BOUND_T2_SIZE; i++) {
+ page[i].size = INVALID_SIZE;
+ page[i].is_invalid = 1;
+ }
+ }
+ for(i = t1_start; i < t1_end; i++) {
+ __bound_t1[i] = __bound_invalid_t2;
+ }
+ if (j != 0) {
+ page = get_page(t1_end);
+ for(i = 0; i < j; i++) {
+ page[i].size = INVALID_SIZE;
+ page[i].is_invalid = 1;
+ }
+ }
+ }
+}
+
+void __bound_init(void)
+{
+ int i;
+ BoundEntry *page;
+ unsigned long start, size;
+ int *p;
+
+ /* save malloc hooks and install bound check hooks */
+ install_malloc_hooks();
+
+#ifndef BOUND_STATIC
+ __bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
+ if (!__bound_t1)
+ bound_alloc_error();
+#endif
+ __bound_empty_t2 = __bound_new_page();
+ for(i=0;i<BOUND_T1_SIZE;i++) {
+ __bound_t1[i] = __bound_empty_t2;
+ }
+
+ page = __bound_new_page();
+ for(i=0;i<BOUND_T2_SIZE;i++) {
+ /* put invalid entries */
+ page[i].start = 0;
+ page[i].size = INVALID_SIZE;
+ page[i].next = NULL;
+ page[i].is_invalid = 1;
+ }
+ __bound_invalid_t2 = page;
+
+ /* invalid pointer zone */
+ start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
+ size = BOUND_T23_SIZE;
+ mark_invalid(start, size);
+
+#if !defined(__TINYC__) && defined(CONFIG_TCC_MALLOC_HOOKS)
+ /* malloc zone is also marked invalid. can only use that with
+ hooks because all libs should use the same malloc. The solution
+ would be to build a new malloc for tcc. */
+ start = (unsigned long)&_end;
+ size = 128 * 0x100000;
+ mark_invalid(start, size);
+#endif
+
+ /* add all static bound check values */
+ p = (int *)&__bounds_start;
+ while (p[0] != 0) {
+ __bound_new_region((void *)p[0], p[1]);
+ p += 2;
+ }
+}
+
+static inline void add_region(BoundEntry *e,
+ unsigned long start, unsigned long size)
+{
+ BoundEntry *e1;
+ if (e->start == 0) {
+ /* no region : add it */
+ e->start = start;
+ e->size = size;
+ } else {
+ /* already regions in the list: add it at the head */
+ e1 = bound_new_entry();
+ e1->start = e->start;
+ e1->size = e->size;
+ e1->next = e->next;
+ e->start = start;
+ e->size = size;
+ e->next = e1;
+ }
+}
+
+/* create a new region. It should not already exist in the region list */
+void __bound_new_region(void *p, unsigned long size)
+{
+ unsigned long start, end;
+ BoundEntry *page, *e, *e2;
+ int t1_start, t1_end, i, t2_start, t2_end;
+
+ start = (unsigned long)p;
+ end = start + size;
+ t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
+ t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
+
+ /* start */
+ page = get_page(t1_start);
+ t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
+ t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
+#ifdef BOUND_DEBUG
+ printf("new %lx %lx %x %x %x %x\n",
+ start, end, t1_start, t1_end, t2_start, t2_end);
+#endif
+
+ e = (BoundEntry *)((char *)page + t2_start);
+ add_region(e, start, size);
+
+ if (t1_end == t1_start) {
+ /* same ending page */
+ e2 = (BoundEntry *)((char *)page + t2_end);
+ if (e2 > e) {
+ e++;
+ for(;e<e2;e++) {
+ e->start = start;
+ e->size = size;
+ }
+ add_region(e, start, size);
+ }
+ } else {
+ /* mark until end of page */
+ e2 = page + BOUND_T2_SIZE;
+ e++;
+ for(;e<e2;e++) {
+ e->start = start;
+ e->size = size;
+ }
+ /* mark intermediate pages, if any */
+ for(i=t1_start+1;i<t1_end;i++) {
+ page = get_page(i);
+ e2 = page + BOUND_T2_SIZE;
+ for(e=page;e<e2;e++) {
+ e->start = start;
+ e->size = size;
+ }
+ }
+ /* last page */
+ page = get_page(t1_end);
+ e2 = (BoundEntry *)((char *)page + t2_end);
+ for(e=page;e<e2;e++) {
+ e->start = start;
+ e->size = size;
+ }
+ add_region(e, start, size);
+ }
+}
+
+/* delete a region */
+static inline void delete_region(BoundEntry *e,
+ void *p, unsigned long empty_size)
+{
+ unsigned long addr;
+ BoundEntry *e1;
+
+ addr = (unsigned long)p;
+ addr -= e->start;
+ if (addr <= e->size) {
+ /* region found is first one */
+ e1 = e->next;
+ if (e1 == NULL) {
+ /* no more region: mark it empty */
+ e->start = 0;
+ e->size = empty_size;
+ } else {
+ /* copy next region in head */
+ e->start = e1->start;
+ e->size = e1->size;
+ e->next = e1->next;
+ bound_free_entry(e1);
+ }
+ } else {
+ /* find the matching region */
+ for(;;) {
+ e1 = e;
+ e = e->next;
+ /* region not found: do nothing */
+ if (e == NULL)
+ break;
+ addr = (unsigned long)p - e->start;
+ if (addr <= e->size) {
+ /* found: remove entry */
+ e1->next = e->next;
+ bound_free_entry(e);
+ break;
+ }
+ }
+ }
+}
+
+/* WARNING: 'p' must be the starting point of the region. */
+/* return non zero if error */
+int __bound_delete_region(void *p)
+{
+ unsigned long start, end, addr, size, empty_size;
+ BoundEntry *page, *e, *e2;
+ int t1_start, t1_end, t2_start, t2_end, i;
+
+ start = (unsigned long)p;
+ t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
+ t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
+
+ /* find region size */
+ page = __bound_t1[t1_start];
+ e = (BoundEntry *)((char *)page + t2_start);
+ addr = start - e->start;
+ if (addr > e->size)
+ e = __bound_find_region(e, p);
+ /* test if invalid region */
+ if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
+ return -1;
+ /* compute the size we put in invalid regions */
+ if (e->is_invalid)
+ empty_size = INVALID_SIZE;
+ else
+ empty_size = EMPTY_SIZE;
+ size = e->size;
+ end = start + size;
+
+ /* now we can free each entry */
+ t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
+ t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
+
+ delete_region(e, p, empty_size);
+ if (t1_end == t1_start) {
+ /* same ending page */
+ e2 = (BoundEntry *)((char *)page + t2_end);
+ if (e2 > e) {
+ e++;
+ for(;e<e2;e++) {
+ e->start = 0;
+ e->size = empty_size;
+ }
+ delete_region(e, p, empty_size);
+ }
+ } else {
+ /* mark until end of page */
+ e2 = page + BOUND_T2_SIZE;
+ e++;
+ for(;e<e2;e++) {
+ e->start = 0;
+ e->size = empty_size;
+ }
+ /* mark intermediate pages, if any */
+ /* XXX: should free them */
+ for(i=t1_start+1;i<t1_end;i++) {
+ page = get_page(i);
+ e2 = page + BOUND_T2_SIZE;
+ for(e=page;e<e2;e++) {
+ e->start = 0;
+ e->size = empty_size;
+ }
+ }
+ /* last page */
+ page = get_page(t2_end);
+ e2 = (BoundEntry *)((char *)page + t2_end);
+ for(e=page;e<e2;e++) {
+ e->start = 0;
+ e->size = empty_size;
+ }
+ delete_region(e, p, empty_size);
+ }
+ return 0;
+}
+
+/* return the size of the region starting at p, or EMPTY_SIZE if non
+ existant region. */
+static unsigned long get_region_size(void *p)
+{
+ unsigned long addr = (unsigned long)p;
+ BoundEntry *e;
+
+ e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
+ e = (BoundEntry *)((char *)e +
+ ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
+ ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
+ addr -= e->start;
+ if (addr > e->size)
+ e = __bound_find_region(e, p);
+ if (e->start != (unsigned long)p)
+ return EMPTY_SIZE;
+ return e->size;
+}
+
+/* patched memory functions */
+
+static void install_malloc_hooks(void)
+{
+#ifdef CONFIG_TCC_MALLOC_HOOKS
+ saved_malloc_hook = __malloc_hook;
+ saved_free_hook = __free_hook;
+ saved_realloc_hook = __realloc_hook;
+ saved_memalign_hook = __memalign_hook;
+ __malloc_hook = __bound_malloc;
+ __free_hook = __bound_free;
+ __realloc_hook = __bound_realloc;
+ __memalign_hook = __bound_memalign;
+#endif
+}
+
+static void restore_malloc_hooks(void)
+{
+#ifdef CONFIG_TCC_MALLOC_HOOKS
+ __malloc_hook = saved_malloc_hook;
+ __free_hook = saved_free_hook;
+ __realloc_hook = saved_realloc_hook;
+ __memalign_hook = saved_memalign_hook;
+#endif
+}
+
+static void *libc_malloc(size_t size)
+{
+ void *ptr;
+ restore_malloc_hooks();
+ ptr = malloc(size);
+ install_malloc_hooks();
+ return ptr;
+}
+
+static void libc_free(void *ptr)
+{
+ restore_malloc_hooks();
+ free(ptr);
+ install_malloc_hooks();
+}
+
+/* XXX: we should use a malloc which ensure that it is unlikely that
+ two malloc'ed data have the same address if 'free' are made in
+ between. */
+void *__bound_malloc(size_t size, const void *caller)
+{
+ void *ptr;
+
+ /* we allocate one more byte to ensure the regions will be
+ separated by at least one byte. With the glibc malloc, it may
+ be in fact not necessary */
+ ptr = libc_malloc(size + 1);
+
+ if (!ptr)
+ return NULL;
+ __bound_new_region(ptr, size);
+ return ptr;
+}
+
+void *__bound_memalign(size_t size, size_t align, const void *caller)
+{
+ void *ptr;
+
+ restore_malloc_hooks();
+
+#ifndef HAVE_MEMALIGN
+ if (align > 4) {
+ /* XXX: handle it ? */
+ ptr = NULL;
+ } else {
+ /* we suppose that malloc aligns to at least four bytes */
+ ptr = malloc(size + 1);
+ }
+#else
+ /* we allocate one more byte to ensure the regions will be
+ separated by at least one byte. With the glibc malloc, it may
+ be in fact not necessary */
+ ptr = memalign(size + 1, align);
+#endif
+
+ install_malloc_hooks();
+
+ if (!ptr)
+ return NULL;
+ __bound_new_region(ptr, size);
+ return ptr;
+}
+
+void __bound_free(void *ptr, const void *caller)
+{
+ if (ptr == NULL)
+ return;
+ if (__bound_delete_region(ptr) != 0)
+ bound_error("freeing invalid region");
+
+ libc_free(ptr);
+}
+
+void *__bound_realloc(void *ptr, size_t size, const void *caller)
+{
+ void *ptr1;
+ int old_size;
+
+ if (size == 0) {
+ __bound_free(ptr, caller);
+ return NULL;
+ } else {
+ ptr1 = __bound_malloc(size, caller);
+ if (ptr == NULL || ptr1 == NULL)
+ return ptr1;
+ old_size = get_region_size(ptr);
+ if (old_size == EMPTY_SIZE)
+ bound_error("realloc'ing invalid pointer");
+ memcpy(ptr1, ptr, old_size);
+ __bound_free(ptr, caller);
+ return ptr1;
+ }
+}
+
+#ifndef CONFIG_TCC_MALLOC_HOOKS
+void *__bound_calloc(size_t nmemb, size_t size)
+{
+ void *ptr;
+ size = size * nmemb;
+ ptr = __bound_malloc(size, NULL);
+ if (!ptr)
+ return NULL;
+ memset(ptr, 0, size);
+ return ptr;
+}
+#endif
+
+#if 0
+static void bound_dump(void)
+{
+ BoundEntry *page, *e;
+ int i, j;
+
+ printf("region dump:\n");
+ for(i=0;i<BOUND_T1_SIZE;i++) {
+ page = __bound_t1[i];
+ for(j=0;j<BOUND_T2_SIZE;j++) {
+ e = page + j;
+ /* do not print invalid or empty entries */
+ if (e->size != EMPTY_SIZE && e->start != 0) {
+ printf("%08x:",
+ (i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
+ (j << BOUND_T3_BITS));
+ do {
+ printf(" %08lx:%08lx", e->start, e->start + e->size);
+ e = e->next;
+ } while (e != NULL);
+ printf("\n");
+ }
+ }
+ }
+}
+#endif
+
+/* some useful checked functions */
+
+/* check that (p ... p + size - 1) lies inside 'p' region, if any */
+static void __bound_check(const void *p, size_t size)
+{
+ if (size == 0)
+ return;
+ p = __bound_ptr_add((void *)p, size);
+ if (p == INVALID_POINTER)
+ bound_error("invalid pointer");
+}
+
+void *__bound_memcpy(void *dst, const void *src, size_t size)
+{
+ __bound_check(dst, size);
+ __bound_check(src, size);
+ /* check also region overlap */
+ if (src >= dst && src < dst + size)
+ bound_error("overlapping regions in memcpy()");
+ return memcpy(dst, src, size);
+}
+
+void *__bound_memmove(void *dst, const void *src, size_t size)
+{
+ __bound_check(dst, size);
+ __bound_check(src, size);
+ return memmove(dst, src, size);
+}
+
+void *__bound_memset(void *dst, int c, size_t size)
+{
+ __bound_check(dst, size);
+ return memset(dst, c, size);
+}
+
+/* XXX: could be optimized */
+int __bound_strlen(const char *s)
+{
+ const char *p;
+ int len;
+
+ len = 0;
+ for(;;) {
+ p = __bound_ptr_indir1((char *)s, len);
+ if (p == INVALID_POINTER)
+ bound_error("bad pointer in strlen()");
+ if (*p == '\0')
+ break;
+ len++;
+ }
+ return len;
+}
+
+char *__bound_strcpy(char *dst, const char *src)
+{
+ int len;
+ len = __bound_strlen(src);
+ return __bound_memcpy(dst, src, len + 1);
+}
+
diff --git a/05/tcc-0.9.25/lib/libtcc1.c b/05/tcc-0.9.25/lib/libtcc1.c
new file mode 100644
index 0000000..b079477
--- /dev/null
+++ b/05/tcc-0.9.25/lib/libtcc1.c
@@ -0,0 +1,607 @@
+/* TCC runtime library.
+ Parts of this code are (c) 2002 Fabrice Bellard
+
+ Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+*/
+
+#define W_TYPE_SIZE 32
+#define BITS_PER_UNIT 8
+
+typedef int Wtype;
+typedef unsigned int UWtype;
+typedef unsigned int USItype;
+typedef long long DWtype;
+typedef unsigned long long UDWtype;
+
+struct DWstruct {
+ Wtype low, high;
+};
+
+typedef union
+{
+ struct DWstruct s;
+ DWtype ll;
+} DWunion;
+
+typedef long double XFtype;
+#define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
+#define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
+
+/* the following deal with IEEE single-precision numbers */
+#define EXCESS 126
+#define SIGNBIT 0x80000000
+#define HIDDEN (1 << 23)
+#define SIGN(fp) ((fp) & SIGNBIT)
+#define EXP(fp) (((fp) >> 23) & 0xFF)
+#define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN)
+#define PACK(s,e,m) ((s) | ((e) << 23) | (m))
+
+/* the following deal with IEEE double-precision numbers */
+#define EXCESSD 1022
+#define HIDDEND (1 << 20)
+#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
+#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
+#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
+ (fp.l.lower >> 22))
+#define HIDDEND_LL ((long long)1 << 52)
+#define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL)
+#define PACKD_LL(s,e,m) (((long long)((s)+((e)<<20))<<32)|(m))
+
+/* the following deal with x86 long double-precision numbers */
+#define EXCESSLD 16382
+#define EXPLD(fp) (fp.l.upper & 0x7fff)
+#define SIGNLD(fp) ((fp.l.upper) & 0x8000)
+
+/* only for x86 */
+union ldouble_long {
+ long double ld;
+ struct {
+ unsigned long long lower;
+ unsigned short upper;
+ } l;
+};
+
+union double_long {
+ double d;
+#if 1
+ struct {
+ unsigned int lower;
+ int upper;
+ } l;
+#else
+ struct {
+ int upper;
+ unsigned int lower;
+ } l;
+#endif
+ long long ll;
+};
+
+union float_long {
+ float f;
+ long l;
+};
+
+/* XXX: we don't support several builtin supports for now */
+#ifndef __x86_64__
+
+/* XXX: use gcc/tcc intrinsic ? */
+#if defined(__i386__)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
+ : "=r" ((USItype) (sh)), \
+ "=&r" ((USItype) (sl)) \
+ : "0" ((USItype) (ah)), \
+ "g" ((USItype) (bh)), \
+ "1" ((USItype) (al)), \
+ "g" ((USItype) (bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" ((USItype) (w0)), \
+ "=d" ((USItype) (w1)) \
+ : "%0" ((USItype) (u)), \
+ "rm" ((USItype) (v)))
+#define udiv_qrnnd(q, r, n1, n0, dv) \
+ __asm__ ("divl %4" \
+ : "=a" ((USItype) (q)), \
+ "=d" ((USItype) (r)) \
+ : "0" ((USItype) (n0)), \
+ "1" ((USItype) (n1)), \
+ "rm" ((USItype) (dv)))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("bsrl %1,%0" \
+ : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#else
+#error unsupported CPU type
+#endif
+
+/* most of this code is taken from libgcc2.c from gcc */
+
+static UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
+{
+ DWunion ww;
+ DWunion nn, dd;
+ DWunion rr;
+ UWtype d0, d1, n0, n1, n2;
+ UWtype q0, q1;
+ UWtype b, bm;
+
+ nn.ll = n;
+ dd.ll = d;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of W_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ UWtype m1, m0;
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+ return ww.ll;
+}
+
+#define __negdi2(a) (-(a))
+
+long long __divdi3(long long u, long long v)
+{
+ int c = 0;
+ DWunion uu, vv;
+ DWtype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0) {
+ c = ~c;
+ uu.ll = __negdi2 (uu.ll);
+ }
+ if (vv.s.high < 0) {
+ c = ~c;
+ vv.ll = __negdi2 (vv.ll);
+ }
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
+ if (c)
+ w = __negdi2 (w);
+ return w;
+}
+
+long long __moddi3(long long u, long long v)
+{
+ int c = 0;
+ DWunion uu, vv;
+ DWtype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0) {
+ c = ~c;
+ uu.ll = __negdi2 (uu.ll);
+ }
+ if (vv.s.high < 0)
+ vv.ll = __negdi2 (vv.ll);
+
+ __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) &w);
+ if (c)
+ w = __negdi2 (w);
+ return w;
+}
+
+unsigned long long __udivdi3(unsigned long long u, unsigned long long v)
+{
+ return __udivmoddi4 (u, v, (UDWtype *) 0);
+}
+
+unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
+{
+ UDWtype w;
+
+ __udivmoddi4 (u, v, &w);
+ return w;
+}
+
+/* XXX: fix tcc's code generator to do this instead */
+long long __ashrdi3(long long a, int b)
+{
+#ifdef __TINYC__
+ DWunion u;
+ u.ll = a;
+ if (b >= 32) {
+ u.s.low = u.s.high >> (b - 32);
+ u.s.high = u.s.high >> 31;
+ } else if (b != 0) {
+ u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
+ u.s.high = u.s.high >> b;
+ }
+ return u.ll;
+#else
+ return a >> b;
+#endif
+}
+
+/* XXX: fix tcc's code generator to do this instead */
+unsigned long long __lshrdi3(unsigned long long a, int b)
+{
+#ifdef __TINYC__
+ DWunion u;
+ u.ll = a;
+ if (b >= 32) {
+ u.s.low = (unsigned)u.s.high >> (b - 32);
+ u.s.high = 0;
+ } else if (b != 0) {
+ u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
+ u.s.high = (unsigned)u.s.high >> b;
+ }
+ return u.ll;
+#else
+ return a >> b;
+#endif
+}
+
+/* XXX: fix tcc's code generator to do this instead */
+long long __ashldi3(long long a, int b)
+{
+#ifdef __TINYC__
+ DWunion u;
+ u.ll = a;
+ if (b >= 32) {
+ u.s.high = (unsigned)u.s.low << (b - 32);
+ u.s.low = 0;
+ } else if (b != 0) {
+ u.s.high = ((unsigned)u.s.high << b) | ((unsigned)u.s.low >> (32 - b));
+ u.s.low = (unsigned)u.s.low << b;
+ }
+ return u.ll;
+#else
+ return a << b;
+#endif
+}
+
+#if defined(__i386__)
+/* FPU control word for rounding to nearest mode */
+unsigned short __tcc_fpu_control = 0x137f;
+/* FPU control word for round to zero mode for int conversion */
+unsigned short __tcc_int_fpu_control = 0x137f | 0x0c00;
+#endif
+
+#endif /* !__x86_64__ */
+
+/* XXX: fix tcc's code generator to do this instead */
+float __floatundisf(unsigned long long a)
+{
+ DWunion uu;
+ XFtype r;
+
+ uu.ll = a;
+ if (uu.s.high >= 0) {
+ return (float)uu.ll;
+ } else {
+ r = (XFtype)uu.ll;
+ r += 18446744073709551616.0;
+ return (float)r;
+ }
+}
+
+double __floatundidf(unsigned long long a)
+{
+ DWunion uu;
+ XFtype r;
+
+ uu.ll = a;
+ if (uu.s.high >= 0) {
+ return (double)uu.ll;
+ } else {
+ r = (XFtype)uu.ll;
+ r += 18446744073709551616.0;
+ return (double)r;
+ }
+}
+
+long double __floatundixf(unsigned long long a)
+{
+ DWunion uu;
+ XFtype r;
+
+ uu.ll = a;
+ if (uu.s.high >= 0) {
+ return (long double)uu.ll;
+ } else {
+ r = (XFtype)uu.ll;
+ r += 18446744073709551616.0;
+ return (long double)r;
+ }
+}
+
+unsigned long long __fixunssfdi (float a1)
+{
+ register union float_long fl1;
+ register int exp;
+ register unsigned long l;
+
+ fl1.f = a1;
+
+ if (fl1.l == 0)
+ return (0);
+
+ exp = EXP (fl1.l) - EXCESS - 24;
+
+ l = MANT(fl1.l);
+ if (exp >= 41)
+ return (unsigned long long)-1;
+ else if (exp >= 0)
+ return (unsigned long long)l << exp;
+ else if (exp >= -23)
+ return l >> -exp;
+ else
+ return 0;
+}
+
+unsigned long long __fixunsdfdi (double a1)
+{
+ register union double_long dl1;
+ register int exp;
+ register unsigned long long l;
+
+ dl1.d = a1;
+
+ if (dl1.ll == 0)
+ return (0);
+
+ exp = EXPD (dl1) - EXCESSD - 53;
+
+ l = MANTD_LL(dl1);
+
+ if (exp >= 12)
+ return (unsigned long long)-1;
+ else if (exp >= 0)
+ return l << exp;
+ else if (exp >= -52)
+ return l >> -exp;
+ else
+ return 0;
+}
+
+unsigned long long __fixunsxfdi (long double a1)
+{
+ register union ldouble_long dl1;
+ register int exp;
+ register unsigned long long l;
+
+ dl1.ld = a1;
+
+ if (dl1.l.lower == 0 && dl1.l.upper == 0)
+ return (0);
+
+ exp = EXPLD (dl1) - EXCESSLD - 64;
+
+ l = dl1.l.lower;
+
+ if (exp > 0)
+ return (unsigned long long)-1;
+ else if (exp >= -63)
+ return l >> -exp;
+ else
+ return 0;
+}
+