soroban.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. // SPDX-License-Identifier: Apache-2.0
  2. // Minimal WASM bump allocator in C (no free).
  3. // Exports:
  4. // soroban_alloc(size) -> void*
  5. // soroban_alloc_align(size, align) -> void*
  6. // soroban_alloc_init(size, init_ptr) -> struct vector*
  7. // Returns a pointer to a `struct vector` (see stdlib.h),
  8. // with `len` and `size` set to `size` and `data` initialized
  9. // from `init_ptr` if provided.
  10. // soroban_malloc(size) -> void*
  11. // soroban_realloc(ptr, new_size) -> void* (COPY using header)
  12. // soroban_realloc_with_old(ptr, old_size, new_size) -> void* (explicit copy)
  13. // soroban_free(ptr, size, align) -> void (no-op)
  14. #include <stdint.h>
  15. #include <stddef.h>
  16. #include "stdlib.h"
  17. #ifndef SOROBAN_PAGE_LOG2
  18. #define SOROBAN_PAGE_LOG2 16u // 64 KiB
  19. #endif
  20. #define SOROBAN_PAGE_SIZE (1u << SOROBAN_PAGE_LOG2)
  21. #define SOROBAN_MEM_INDEX 0 // wasm memory #0
  22. // clang/LLVM wasm32 intrinsics
  23. static inline uint32_t wasm_memory_size_pages(void)
  24. {
  25. return (uint32_t)__builtin_wasm_memory_size(SOROBAN_MEM_INDEX);
  26. }
  27. static inline int32_t wasm_memory_grow_pages(uint32_t delta_pages)
  28. {
  29. return (int32_t)__builtin_wasm_memory_grow(SOROBAN_MEM_INDEX, (int)delta_pages);
  30. }
  31. static uint32_t g_cursor = 0; // current bump (bytes)
  32. static uint32_t g_limit = 0; // grown end (bytes)
  33. // We prepend a small header before each returned allocation in order to
  34. // remember the allocation size. This enables `realloc`-style copying even
  35. // though this is a bump allocator without frees.
  36. typedef struct
  37. {
  38. uint32_t size; // payload size in bytes (not including this header)
  39. } soroban_hdr_t;
  40. static inline void *hdr_to_ptr(soroban_hdr_t *h)
  41. {
  42. return (void *)((uintptr_t)h + sizeof(soroban_hdr_t));
  43. }
  44. static inline soroban_hdr_t *ptr_to_hdr(void *p)
  45. {
  46. return (soroban_hdr_t *)((uintptr_t)p - sizeof(soroban_hdr_t));
  47. }
  48. static inline void *mem_copy(void *dst, const void *src, uint32_t n)
  49. {
  50. // Simple, portable copy to avoid pulling in libc in freestanding mode
  51. unsigned char *d = (unsigned char *)dst;
  52. const unsigned char *s = (const unsigned char *)src;
  53. for (uint32_t i = 0; i < n; i++)
  54. d[i] = s[i];
  55. return dst;
  56. }
  57. static inline uint32_t align_up(uint32_t addr, uint32_t align)
  58. {
  59. if (align == 0)
  60. align = 1;
  61. uint32_t mask = align - 1;
  62. return (addr + mask) & ~mask;
  63. }
  64. static inline void maybe_init(void)
  65. {
  66. if (g_limit == 0)
  67. {
  68. uint32_t end = wasm_memory_size_pages() << SOROBAN_PAGE_LOG2; // bytes
  69. g_cursor = end;
  70. g_limit = end;
  71. }
  72. }
  73. // grow so that `need_bytes` fits (<== need_bytes is a byte address)
  74. static inline int ensure_capacity(uint32_t need_bytes)
  75. {
  76. if (need_bytes <= g_limit)
  77. return 1;
  78. uint32_t deficit = need_bytes - g_limit;
  79. uint32_t pages = (deficit + (SOROBAN_PAGE_SIZE - 1)) >> SOROBAN_PAGE_LOG2;
  80. if (wasm_memory_grow_pages(pages) < 0)
  81. return 0; // OOM
  82. g_limit += pages << SOROBAN_PAGE_LOG2;
  83. return 1;
  84. }
  85. static void *alloc_impl(uint32_t bytes, uint32_t align)
  86. {
  87. maybe_init();
  88. // Ensure there is space for the header while keeping the returned pointer
  89. // aligned as requested.
  90. uint32_t start = align_up(g_cursor + (uint32_t)sizeof(soroban_hdr_t), align ? align : 1);
  91. uint32_t end = start + bytes;
  92. if (end > g_limit)
  93. {
  94. if (!ensure_capacity(end))
  95. return (void *)0; // OOM
  96. // retry after growth
  97. start = align_up(g_cursor + (uint32_t)sizeof(soroban_hdr_t), align ? align : 1);
  98. end = start + bytes;
  99. }
  100. g_cursor = end;
  101. // Write header just before the returned pointer
  102. soroban_hdr_t *hdr = (soroban_hdr_t *)(uintptr_t)(start - (uint32_t)sizeof(soroban_hdr_t));
  103. hdr->size = bytes;
  104. return (void *)(uintptr_t)start;
  105. }
  106. // -------------------- exported API --------------------
  107. __attribute__((export_name("soroban_alloc"))) void *soroban_alloc(uint32_t size)
  108. {
  109. // default alignment 8
  110. return alloc_impl(size, 8);
  111. }
  112. __attribute__((export_name("soroban_alloc_init"))) struct vector *soroban_alloc_init(uint32_t members,
  113. const void *init_ptr)
  114. {
  115. // Emulate stdlib.c:vector_new() but allocate via alloc_impl.
  116. // Note: here `members` is the number of bytes in the vector payload
  117. // (element size assumed to be 1 for Soroban at present).
  118. uint32_t size_array = members;
  119. struct vector *v = (struct vector *)alloc_impl((uint32_t)sizeof(struct vector) + size_array, 8);
  120. if (v == (struct vector *)0)
  121. {
  122. return (struct vector *)0;
  123. }
  124. v->len = members;
  125. v->size = members;
  126. uint8_t *data = v->data;
  127. if (size_array)
  128. {
  129. if (init_ptr != (const void *)0)
  130. {
  131. mem_copy(data, init_ptr, size_array);
  132. }
  133. else
  134. {
  135. // zero-initialize when no initializer provided
  136. for (uint32_t i = 0; i < size_array; i++)
  137. data[i] = 0;
  138. }
  139. }
  140. return v;
  141. }
  142. __attribute__((export_name("soroban_alloc_align"))) void *soroban_alloc_align(uint32_t size, uint32_t align)
  143. {
  144. return alloc_impl(size, align);
  145. }
  146. __attribute__((export_name("soroban_malloc"))) void *soroban_malloc(uint32_t size)
  147. {
  148. return alloc_impl(size, 8);
  149. }
  150. // Reallocate and copy previous contents. Since we store a small header in
  151. // front of each allocation, we can determine the old size here and copy the
  152. // minimum of old and new sizes.
  153. __attribute__((export_name("soroban_realloc"))) void *soroban_realloc(void *old_ptr, uint32_t new_size)
  154. {
  155. if (old_ptr == (void *)0)
  156. {
  157. return alloc_impl(new_size, 8);
  158. }
  159. // Determine old size from the header placed before the allocation
  160. soroban_hdr_t *old_hdr = ptr_to_hdr(old_ptr);
  161. uint32_t old_size = old_hdr->size;
  162. void *new_ptr = alloc_impl(new_size, 8);
  163. if (new_ptr == (void *)0)
  164. return (void *)0; // OOM
  165. uint32_t copy = old_size < new_size ? old_size : new_size;
  166. if (copy)
  167. mem_copy(new_ptr, old_ptr, copy);
  168. return new_ptr;
  169. }
  170. // Variant that accepts the old size explicitly. Useful when the caller
  171. // already knows the previous allocation size and wants to avoid relying on
  172. // the header (or for interop with older allocations).
  173. __attribute__((export_name("soroban_realloc_with_old"))) void *soroban_realloc_with_old(void *old_ptr,
  174. uint32_t old_size,
  175. uint32_t new_size)
  176. {
  177. if (old_ptr == (void *)0)
  178. {
  179. return alloc_impl(new_size, 8);
  180. }
  181. void *new_ptr = alloc_impl(new_size, 8);
  182. if (new_ptr == (void *)0)
  183. return (void *)0; // OOM
  184. uint32_t copy = old_size < new_size ? old_size : new_size;
  185. if (copy)
  186. mem_copy(new_ptr, old_ptr, copy);
  187. return new_ptr;
  188. }
  189. __attribute__((export_name("soroban_free"))) void soroban_free(void *_ptr, uint32_t _size, uint32_t _align)
  190. {
  191. (void)_ptr;
  192. (void)_size;
  193. (void)_align; // bump allocator: no-op
  194. }