Skip to content

Latest commit

 

History

History
1098 lines (930 loc) · 27.9 KB

mojoshader_common.c

File metadata and controls

1098 lines (930 loc) · 27.9 KB
 
1
2
3
#define __MOJOSHADER_INTERNAL__ 1
#include "mojoshader_internal.h"
Jan 3, 2015
Jan 3, 2015
4
5
// Convenience functions for allocators...
#if !MOJOSHADER_FORCE_ALLOCATOR
Jan 1, 2016
Jan 1, 2016
6
7
void * MOJOSHADERCALL MOJOSHADER_internal_malloc(int bytes, void *d) { return malloc(bytes); }
void MOJOSHADERCALL MOJOSHADER_internal_free(void *ptr, void *d) { free(ptr); }
Jan 3, 2015
Jan 3, 2015
8
9
10
11
12
13
14
15
16
17
18
19
#endif
MOJOSHADER_error MOJOSHADER_out_of_mem_error = {
"Out of memory", NULL, MOJOSHADER_POSITION_NONE
};
MOJOSHADER_parseData MOJOSHADER_out_of_mem_data = {
1, &MOJOSHADER_out_of_mem_error, 0, 0, 0, 0,
MOJOSHADER_TYPE_UNKNOWN, 0, 0, 0, 0
};
20
21
22
23
24
25
26
27
28
29
30
31
typedef struct HashItem
{
const void *key;
const void *value;
struct HashItem *next;
} HashItem;
struct HashTable
{
HashItem **table;
uint32 table_len;
int stackable;
Feb 24, 2010
Feb 24, 2010
32
void *data;
33
34
35
HashTable_HashFn hash;
HashTable_KeyMatchFn keymatch;
HashTable_NukeFn nuke;
Feb 24, 2010
Feb 24, 2010
36
37
38
MOJOSHADER_malloc m;
MOJOSHADER_free f;
void *d;
Feb 24, 2010
Feb 24, 2010
41
42
43
44
45
static inline uint32 calc_hash(const HashTable *table, const void *key)
{
return table->hash(key, table->data) & (table->table_len-1);
} // calc_hash
46
47
48
int hash_find(const HashTable *table, const void *key, const void **_value)
{
HashItem *i;
Feb 24, 2010
Feb 24, 2010
49
50
51
void *data = table->data;
const uint32 hash = calc_hash(table, key);
HashItem *prev = NULL;
52
53
for (i = table->table[hash]; i != NULL; i = i->next)
{
Feb 24, 2010
Feb 24, 2010
54
if (table->keymatch(key, i->key, data))
55
56
57
{
if (_value != NULL)
*_value = i->value;
Feb 24, 2010
Feb 24, 2010
58
59
60
61
62
63
64
65
66
67
68
// Matched! Move to the front of list for faster lookup next time.
// (stackable tables have to remain in the same order, though!)
if ((!table->stackable) && (prev != NULL))
{
assert(prev->next == i);
prev->next = i->next;
i->next = table->table[hash];
table->table[hash] = i;
} // if
69
70
return 1;
} // if
Feb 24, 2010
Feb 24, 2010
71
72
prev = i;
73
74
75
76
77
} // for
return 0;
} // hash_find
Dec 12, 2010
Dec 12, 2010
78
79
80
int hash_iter(const HashTable *table, const void *key,
const void **_value, void **iter)
{
Jan 1, 2016
Jan 1, 2016
81
HashItem *item = (HashItem *) *iter;
Nov 11, 2011
Nov 11, 2011
82
83
if (item == NULL)
item = table->table[calc_hash(table, key)];
Dec 12, 2010
Dec 12, 2010
84
else
Nov 11, 2011
Nov 11, 2011
85
item = item->next;
Dec 12, 2010
Dec 12, 2010
86
Nov 11, 2011
Nov 11, 2011
87
while (item != NULL)
Dec 12, 2010
Dec 12, 2010
88
{
Nov 11, 2011
Nov 11, 2011
89
if (table->keymatch(key, item->key, table->data))
Dec 12, 2010
Dec 12, 2010
90
{
Nov 11, 2011
Nov 11, 2011
91
92
*_value = item->value;
*iter = item;
Dec 12, 2010
Dec 12, 2010
93
94
return 1;
} // if
Nov 11, 2011
Nov 11, 2011
95
item = item->next;
Dec 12, 2010
Dec 12, 2010
96
97
98
99
100
101
102
103
} // while
// no more matches.
*_value = NULL;
*iter = NULL;
return 0;
} // hash_iter
Nov 11, 2011
Nov 11, 2011
104
105
int hash_iter_keys(const HashTable *table, const void **_key, void **iter)
{
Jan 1, 2016
Jan 1, 2016
106
HashItem *item = (HashItem *) *iter;
Nov 11, 2011
Nov 11, 2011
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
int idx = 0;
if (item != NULL)
{
const HashItem *orig = item;
item = item->next;
if (item == NULL)
idx = calc_hash(table, orig->key) + 1;
} // if
while (!item && (idx < table->table_len))
item = table->table[idx++]; // skip empty buckets...
if (item == NULL) // no more matches?
{
*_key = NULL;
*iter = NULL;
return 0;
} // if
*_key = item->key;
*iter = item;
return 1;
} // hash_iter_keys
Dec 12, 2010
Dec 12, 2010
131
132
133
134
int hash_insert(HashTable *table, const void *key, const void *value)
{
HashItem *item = NULL;
Feb 24, 2010
Feb 24, 2010
135
const uint32 hash = calc_hash(table, key);
136
137
138
139
if ( (!table->stackable) && (hash_find(table, key, NULL)) )
return 0;
// !!! FIXME: grow and rehash table if it gets too saturated.
Feb 24, 2010
Feb 24, 2010
140
item = (HashItem *) table->m(sizeof (HashItem), table->d);
141
142
143
144
145
146
147
148
149
150
151
if (item == NULL)
return -1;
item->key = key;
item->value = value;
item->next = table->table[hash];
table->table[hash] = item;
return 1;
} // hash_insert
Feb 24, 2010
Feb 24, 2010
152
HashTable *hash_create(void *data, const HashTable_HashFn hashfn,
153
154
155
156
157
const HashTable_KeyMatchFn keymatchfn,
const HashTable_NukeFn nukefn,
const int stackable,
MOJOSHADER_malloc m, MOJOSHADER_free f, void *d)
{
Feb 24, 2010
Feb 24, 2010
158
const uint32 initial_table_size = 256;
159
const uint32 alloc_len = sizeof (HashItem *) * initial_table_size;
Apr 8, 2009
Apr 8, 2009
160
HashTable *table = (HashTable *) m(sizeof (HashTable), d);
Apr 5, 2009
Apr 5, 2009
161
if (table == NULL)
Apr 8, 2009
Apr 8, 2009
162
return NULL;
163
memset(table, '\0', sizeof (HashTable));
Apr 5, 2009
Apr 5, 2009
164
165
166
table->table = (HashItem **) m(alloc_len, d);
if (table->table == NULL)
Apr 5, 2009
Apr 5, 2009
167
168
{
f(table, d);
Apr 8, 2009
Apr 8, 2009
169
return NULL;
Apr 5, 2009
Apr 5, 2009
170
} // if
171
172
173
174
memset(table->table, '\0', alloc_len);
table->table_len = initial_table_size;
table->stackable = stackable;
Feb 24, 2010
Feb 24, 2010
175
table->data = data;
176
177
178
table->hash = hashfn;
table->keymatch = keymatchfn;
table->nuke = nukefn;
Feb 24, 2010
Feb 24, 2010
179
180
181
table->m = m;
table->f = f;
table->d = d;
Apr 8, 2009
Apr 8, 2009
182
return table;
Apr 5, 2009
Apr 5, 2009
183
} // hash_create
Apr 5, 2009
Apr 5, 2009
185
void hash_destroy(HashTable *table)
Feb 24, 2010
Feb 24, 2010
188
189
190
void *data = table->data;
MOJOSHADER_free f = table->f;
void *d = table->d;
191
192
193
194
195
196
for (i = 0; i < table->table_len; i++)
{
HashItem *item = table->table[i];
while (item != NULL)
{
HashItem *next = item->next;
Feb 24, 2010
Feb 24, 2010
197
198
table->nuke(item->key, item->value, data);
f(item, d);
199
200
201
202
item = next;
} // while
} // for
Feb 24, 2010
Feb 24, 2010
203
204
f(table->table, d);
f(table, d);
Apr 5, 2009
Apr 5, 2009
205
} // hash_destroy
206
207
208
209
210
int hash_remove(HashTable *table, const void *key)
{
HashItem *item = NULL;
HashItem *prev = NULL;
Feb 24, 2010
Feb 24, 2010
211
212
void *data = table->data;
const uint32 hash = calc_hash(table, key);
213
214
for (item = table->table[hash]; item != NULL; item = item->next)
{
Feb 24, 2010
Feb 24, 2010
215
if (table->keymatch(key, item->key, data))
216
217
218
219
220
221
{
if (prev != NULL)
prev->next = item->next;
else
table->table[hash] = item->next;
Feb 24, 2010
Feb 24, 2010
222
223
table->nuke(item->key, item->value, data);
table->f(item, table->d);
224
225
226
227
228
229
230
231
232
return 1;
} // if
prev = item;
} // for
return 0;
} // hash_remove
Apr 5, 2009
Apr 5, 2009
233
234
// this is djb's xor hashing function.
Feb 24, 2010
Feb 24, 2010
235
static inline uint32 hash_string_djbxor(const char *str, size_t len)
Apr 5, 2009
Apr 5, 2009
236
237
{
register uint32 hash = 5381;
Feb 24, 2010
Feb 24, 2010
238
239
while (len--)
hash = ((hash << 5) + hash) ^ *(str++);
Apr 5, 2009
Apr 5, 2009
240
return hash;
Feb 24, 2010
Feb 24, 2010
241
242
243
244
245
246
247
} // hash_string_djbxor
static inline uint32 hash_string(const char *str, size_t len)
{
return hash_string_djbxor(str, len);
} // hash_string
Feb 24, 2010
Feb 24, 2010
248
uint32 hash_hash_string(const void *sym, void *data)
Feb 24, 2010
Feb 24, 2010
249
{
Feb 24, 2010
Feb 24, 2010
250
(void) data;
Feb 25, 2010
Feb 25, 2010
251
return hash_string((const char*) sym, strlen((const char *) sym));
Apr 5, 2009
Apr 5, 2009
252
253
} // hash_hash_string
Feb 24, 2010
Feb 24, 2010
254
int hash_keymatch_string(const void *a, const void *b, void *data)
Apr 5, 2009
Apr 5, 2009
255
{
Feb 24, 2010
Feb 24, 2010
256
(void) data;
Apr 5, 2009
Apr 5, 2009
257
258
259
return (strcmp((const char *) a, (const char *) b) == 0);
} // hash_keymatch_string
Feb 24, 2010
Feb 24, 2010
260
Feb 24, 2010
Feb 24, 2010
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
// string -> string map...
static void stringmap_nuke_noop(const void *key, const void *val, void *d) {}
static void stringmap_nuke(const void *key, const void *val, void *d)
{
StringMap *smap = (StringMap *) d;
smap->f((void *) key, smap->d);
smap->f((void *) val, smap->d);
} // stringmap_nuke
StringMap *stringmap_create(const int copy, MOJOSHADER_malloc m,
MOJOSHADER_free f, void *d)
{
HashTable_NukeFn nuke = copy ? stringmap_nuke : stringmap_nuke_noop;
StringMap *smap;
smap = hash_create(0,hash_hash_string,hash_keymatch_string,nuke,0,m,f,d);
Aug 3, 2012
Aug 3, 2012
278
279
if (smap != NULL)
smap->data = smap;
Feb 24, 2010
Feb 24, 2010
280
281
282
283
284
return smap;
} // stringmap_create
void stringmap_destroy(StringMap *smap)
{
Dec 6, 2010
Dec 6, 2010
285
hash_destroy(smap);
Feb 24, 2010
Feb 24, 2010
286
287
288
289
290
291
292
293
294
295
} // stringmap_destroy
int stringmap_insert(StringMap *smap, const char *key, const char *value)
{
assert(key != NULL);
if (smap->nuke == stringmap_nuke_noop) // no copy?
return hash_insert(smap, key, value);
int rc = -1;
char *k = (char *) smap->m(strlen(key) + 1, smap->d);
Feb 25, 2010
Feb 25, 2010
296
char *v = (char *) (value ? smap->m(strlen(value) + 1, smap->d) : NULL);
Oct 20, 2011
Oct 20, 2011
297
298
299
300
301
302
303
304
305
306
307
int failed = ( (!k) || ((!v) && (value)) );
if (!failed)
{
strcpy(k, key);
if (value != NULL)
strcpy(v, value);
failed = ((rc = hash_insert(smap, k, v)) <= 0);
} // if
if (failed)
Feb 24, 2010
Feb 24, 2010
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
{
smap->f(k, smap->d);
smap->f(v, smap->d);
} // if
return rc;
} // stringmap_insert
int stringmap_remove(StringMap *smap, const char *key)
{
return hash_remove(smap, key);
} // stringmap_remove
int stringmap_find(const StringMap *smap, const char *key, const char **_value)
{
const void *value = NULL;
const int retval = hash_find(smap, key, &value);
*_value = (const char *) value;
return retval;
} // stringmap_find
// The string cache... !!! FIXME: use StringMap internally for this.
Feb 24, 2010
Feb 24, 2010
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
typedef struct StringBucket
{
char *string;
struct StringBucket *next;
} StringBucket;
struct StringCache
{
StringBucket **hashtable;
uint32 table_size;
MOJOSHADER_malloc m;
MOJOSHADER_free f;
void *d;
};
Oct 11, 2012
Oct 11, 2012
347
Feb 24, 2010
Feb 24, 2010
348
349
350
351
352
const char *stringcache(StringCache *cache, const char *str)
{
return stringcache_len(cache, str, strlen(str));
} // stringcache
Oct 11, 2012
Oct 11, 2012
353
354
355
356
static const char *stringcache_len_internal(StringCache *cache,
const char *str,
const unsigned int len,
const int addmissing)
Feb 24, 2010
Feb 24, 2010
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
{
const uint8 hash = hash_string(str, len) & (cache->table_size-1);
StringBucket *bucket = cache->hashtable[hash];
StringBucket *prev = NULL;
while (bucket)
{
const char *bstr = bucket->string;
if ((strncmp(bstr, str, len) == 0) && (bstr[len] == 0))
{
// Matched! Move this to the front of the list.
if (prev != NULL)
{
assert(prev->next == bucket);
prev->next = bucket->next;
bucket->next = cache->hashtable[hash];
cache->hashtable[hash] = bucket;
} // if
return bstr; // already cached
} // if
prev = bucket;
bucket = bucket->next;
} // while
Oct 11, 2012
Oct 11, 2012
380
381
382
383
384
// no match!
if (!addmissing)
return NULL;
// add to the table.
Jun 23, 2014
Jun 23, 2014
385
bucket = (StringBucket *) cache->m(sizeof (StringBucket) + len + 1, cache->d);
Feb 24, 2010
Feb 24, 2010
386
387
if (bucket == NULL)
return NULL;
Jun 23, 2014
Jun 23, 2014
388
bucket->string = (char *)(bucket + 1);
Feb 24, 2010
Feb 24, 2010
389
390
391
392
393
memcpy(bucket->string, str, len);
bucket->string[len] = '\0';
bucket->next = cache->hashtable[hash];
cache->hashtable[hash] = bucket;
return bucket->string;
Oct 11, 2012
Oct 11, 2012
394
395
396
397
398
399
} // stringcache_len_internal
const char *stringcache_len(StringCache *cache, const char *str,
const unsigned int len)
{
return stringcache_len_internal(cache, str, len, 1);
Feb 24, 2010
Feb 24, 2010
400
401
} // stringcache_len
Oct 11, 2012
Oct 11, 2012
402
403
404
405
406
int stringcache_iscached(StringCache *cache, const char *str)
{
return (stringcache_len_internal(cache, str, strlen(str), 0) != NULL);
} // stringcache_iscached
Feb 24, 2010
Feb 24, 2010
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
const char *stringcache_fmt(StringCache *cache, const char *fmt, ...)
{
char buf[128]; // use the stack if reasonable.
char *ptr = NULL;
int len = 0; // number of chars, NOT counting null-terminator!
va_list ap;
va_start(ap, fmt);
len = vsnprintf(buf, sizeof (buf), fmt, ap);
va_end(ap);
if (len > sizeof (buf))
{
ptr = (char *) cache->m(len, cache->d);
if (ptr == NULL)
return NULL;
va_start(ap, fmt);
vsnprintf(ptr, len, fmt, ap);
va_end(ap);
} // if
const char *retval = stringcache_len(cache, ptr ? ptr : buf, len);
if (ptr != NULL)
cache->f(ptr, cache->d);
return retval;
} // stringcache_fmt
StringCache *stringcache_create(MOJOSHADER_malloc m, MOJOSHADER_free f, void *d)
{
const uint32 initial_table_size = 256;
const size_t tablelen = sizeof (StringBucket *) * initial_table_size;
StringCache *cache = (StringCache *) m(sizeof (StringCache), d);
if (!cache)
return NULL;
memset(cache, '\0', sizeof (StringCache));
cache->hashtable = (StringBucket **) m(tablelen, d);
if (!cache->hashtable)
{
f(cache, d);
return NULL;
} // if
memset(cache->hashtable, '\0', tablelen);
cache->table_size = initial_table_size;
cache->m = m;
cache->f = f;
cache->d = d;
return cache;
} // stringcache_create
void stringcache_destroy(StringCache *cache)
{
Nov 4, 2010
Nov 4, 2010
462
463
464
if (cache == NULL)
return;
Feb 24, 2010
Feb 24, 2010
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
MOJOSHADER_free f = cache->f;
void *d = cache->d;
size_t i;
for (i = 0; i < cache->table_size; i++)
{
StringBucket *bucket = cache->hashtable[i];
cache->hashtable[i] = NULL;
while (bucket)
{
StringBucket *next = bucket->next;
f(bucket, d);
bucket = next;
} // while
} // for
f(cache->hashtable, d);
f(cache, d);
} // stringcache_destroy
Nov 4, 2010
Nov 4, 2010
485
Nov 9, 2010
Nov 9, 2010
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
// We chain errors as a linked list with a head/tail for easy appending.
// These get flattened before passing to the application.
typedef struct ErrorItem
{
MOJOSHADER_error error;
struct ErrorItem *next;
} ErrorItem;
struct ErrorList
{
ErrorItem head;
ErrorItem *tail;
int count;
MOJOSHADER_malloc m;
MOJOSHADER_free f;
void *d;
};
Nov 4, 2010
Nov 4, 2010
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
ErrorList *errorlist_create(MOJOSHADER_malloc m, MOJOSHADER_free f, void *d)
{
ErrorList *retval = (ErrorList *) m(sizeof (ErrorList), d);
if (retval != NULL)
{
memset(retval, '\0', sizeof (ErrorList));
retval->tail = &retval->head;
retval->m = m;
retval->f = f;
retval->d = d;
} // if
return retval;
} // errorlist_create
int errorlist_add(ErrorList *list, const char *fname,
const int errpos, const char *str)
{
return errorlist_add_fmt(list, fname, errpos, "%s", str);
} // errorlist_add
int errorlist_add_fmt(ErrorList *list, const char *fname,
const int errpos, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const int retval = errorlist_add_va(list, fname, errpos, fmt, ap);
va_end(ap);
return retval;
} // errorlist_add_fmt
int errorlist_add_va(ErrorList *list, const char *_fname,
const int errpos, const char *fmt, va_list va)
{
ErrorItem *error = (ErrorItem *) list->m(sizeof (ErrorItem), list->d);
if (error == NULL)
return 0;
char *fname = NULL;
if (_fname != NULL)
{
fname = (char *) list->m(strlen(_fname) + 1, list->d);
if (fname == NULL)
{
list->f(error, list->d);
return 0;
} // if
strcpy(fname, _fname);
} // if
char scratch[128];
va_list ap;
va_copy(ap, va);
Apr 18, 2016
Apr 18, 2016
560
int len = vsnprintf(scratch, sizeof (scratch), fmt, ap);
Nov 4, 2010
Nov 4, 2010
561
562
va_end(ap);
Apr 18, 2016
Apr 18, 2016
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
// on some versions of the windows C runtime, vsnprintf() returns -1
// if the buffer overflows instead of the length the string would have
// been as expected.
// In this case we make another copy of va and fetch the length only
// with another call to _vscprintf
#ifdef _MSC_VER
if (len == -1)
{
va_copy(ap, va);
len = _vscprintf(fmt, ap);
va_end(ap);
}
#endif
Nov 4, 2010
Nov 4, 2010
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
char *failstr = (char *) list->m(len + 1, list->d);
if (failstr == NULL)
{
list->f(error, list->d);
list->f(fname, list->d);
return 0;
} // if
// If we overflowed our scratch buffer, that's okay. We were going to
// allocate anyhow...the scratch buffer just lets us avoid a second
// run of vsnprintf().
if (len < sizeof (scratch))
strcpy(failstr, scratch); // copy it over.
else
{
va_copy(ap, va);
vsnprintf(failstr, len + 1, fmt, ap); // rebuild it.
va_end(ap);
} // else
error->error.error = failstr;
error->error.filename = fname;
error->error.error_position = errpos;
error->next = NULL;
list->tail->next = error;
list->tail = error;
list->count++;
return 1;
} // errorlist_add_va
Nov 9, 2010
Nov 9, 2010
611
612
613
614
615
616
int errorlist_count(ErrorList *list)
{
return list->count;
} // errorlist_count
Nov 4, 2010
Nov 4, 2010
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
MOJOSHADER_error *errorlist_flatten(ErrorList *list)
{
if (list->count == 0)
return NULL;
int total = 0;
MOJOSHADER_error *retval = (MOJOSHADER_error *)
list->m(sizeof (MOJOSHADER_error) * list->count, list->d);
if (retval == NULL)
return NULL;
ErrorItem *item = list->head.next;
while (item != NULL)
{
ErrorItem *next = item->next;
// reuse the string allocations
memcpy(&retval[total], &item->error, sizeof (MOJOSHADER_error));
list->f(item, list->d);
item = next;
total++;
} // while
assert(total == list->count);
list->count = 0;
list->head.next = NULL;
list->tail = &list->head;
return retval;
} // errorlist_flatten
void errorlist_destroy(ErrorList *list)
{
if (list == NULL)
return;
MOJOSHADER_free f = list->f;
void *d = list->d;
ErrorItem *item = list->head.next;
while (item != NULL)
{
ErrorItem *next = item->next;
f((void *) item->error.error, d);
f((void *) item->error.filename, d);
f(item, d);
item = next;
} // while
f(list, d);
} // errorlist_destroy
Nov 9, 2010
Nov 9, 2010
667
668
typedef struct BufferBlock
{
Nov 10, 2010
Nov 10, 2010
669
uint8 *data;
Nov 9, 2010
Nov 9, 2010
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
size_t bytes;
struct BufferBlock *next;
} BufferBlock;
struct Buffer
{
size_t total_bytes;
BufferBlock *head;
BufferBlock *tail;
size_t block_size;
MOJOSHADER_malloc m;
MOJOSHADER_free f;
void *d;
};
Buffer *buffer_create(size_t blksz, MOJOSHADER_malloc m,
MOJOSHADER_free f, void *d)
{
Buffer *buffer = (Buffer *) m(sizeof (Buffer), d);
if (buffer != NULL)
{
memset(buffer, '\0', sizeof (Buffer));
buffer->block_size = blksz;
buffer->m = m;
buffer->f = f;
buffer->d = d;
} // if
return buffer;
} // buffer_create
Nov 19, 2010
Nov 19, 2010
700
char *buffer_reserve(Buffer *buffer, const size_t len)
Nov 9, 2010
Nov 9, 2010
701
702
703
704
705
{
// note that we make the blocks bigger than blocksize when we have enough
// data to overfill a fresh block, to reduce allocations.
const size_t blocksize = buffer->block_size;
Nov 10, 2010
Nov 10, 2010
706
707
708
709
710
711
712
713
714
715
716
717
if (len == 0)
return NULL;
if (buffer->tail != NULL)
{
const size_t tailbytes = buffer->tail->bytes;
const size_t avail = (tailbytes >= blocksize) ? 0 : blocksize - tailbytes;
if (len <= avail)
{
buffer->tail->bytes += len;
buffer->total_bytes += len;
assert(buffer->tail->bytes <= blocksize);
Nov 19, 2010
Nov 19, 2010
718
return (char *) buffer->tail->data + tailbytes;
Nov 10, 2010
Nov 10, 2010
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
} // if
} // if
// need to allocate a new block (even if a previous block wasn't filled,
// so this buffer is contiguous).
const size_t bytecount = len > blocksize ? len : blocksize;
const size_t malloc_len = sizeof (BufferBlock) + bytecount;
BufferBlock *item = (BufferBlock *) buffer->m(malloc_len, buffer->d);
if (item == NULL)
return NULL;
item->data = ((uint8 *) item) + sizeof (BufferBlock);
item->bytes = len;
item->next = NULL;
if (buffer->tail != NULL)
buffer->tail->next = item;
else
buffer->head = item;
buffer->tail = item;
buffer->total_bytes += len;
Nov 19, 2010
Nov 19, 2010
741
return (char *) item->data;
Nov 10, 2010
Nov 10, 2010
742
743
744
745
746
747
748
749
750
751
} // buffer_reserve
int buffer_append(Buffer *buffer, const void *_data, size_t len)
{
const uint8 *data = (const uint8 *) _data;
// note that we make the blocks bigger than blocksize when we have enough
// data to overfill a fresh block, to reduce allocations.
const size_t blocksize = buffer->block_size;
Nov 9, 2010
Nov 9, 2010
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
if (len == 0)
return 1;
if (buffer->tail != NULL)
{
const size_t tailbytes = buffer->tail->bytes;
const size_t avail = (tailbytes >= blocksize) ? 0 : blocksize - tailbytes;
const size_t cpy = (avail > len) ? len : avail;
if (cpy > 0)
{
memcpy(buffer->tail->data + tailbytes, data, cpy);
len -= cpy;
data += cpy;
buffer->tail->bytes += cpy;
buffer->total_bytes += cpy;
assert(buffer->tail->bytes <= blocksize);
} // if
} // if
if (len > 0)
{
Sep 17, 2012
Sep 17, 2012
773
assert((!buffer->tail) || (buffer->tail->bytes >= blocksize));
Nov 9, 2010
Nov 9, 2010
774
775
776
777
778
779
const size_t bytecount = len > blocksize ? len : blocksize;
const size_t malloc_len = sizeof (BufferBlock) + bytecount;
BufferBlock *item = (BufferBlock *) buffer->m(malloc_len, buffer->d);
if (item == NULL)
return 0;
Nov 10, 2010
Nov 10, 2010
780
item->data = ((uint8 *) item) + sizeof (BufferBlock);
Nov 9, 2010
Nov 9, 2010
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
item->bytes = len;
item->next = NULL;
if (buffer->tail != NULL)
buffer->tail->next = item;
else
buffer->head = item;
buffer->tail = item;
memcpy(item->data, data, len);
buffer->total_bytes += len;
} // if
return 1;
} // buffer_append
int buffer_append_fmt(Buffer *buffer, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const int retval = buffer_append_va(buffer, fmt, ap);
va_end(ap);
return retval;
} // buffer_append_fmt
int buffer_append_va(Buffer *buffer, const char *fmt, va_list va)
{
Apr 13, 2012
Apr 13, 2012
807
char scratch[256];
Nov 9, 2010
Nov 9, 2010
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
va_list ap;
va_copy(ap, va);
const int len = vsnprintf(scratch, sizeof (scratch), fmt, ap);
va_end(ap);
// If we overflowed our scratch buffer, heap allocate and try again.
if (len == 0)
return 1; // nothing to do.
else if (len < sizeof (scratch))
return buffer_append(buffer, scratch, len);
char *buf = (char *) buffer->m(len + 1, buffer->d);
if (buf == NULL)
return 0;
va_copy(ap, va);
vsnprintf(buf, len + 1, fmt, ap); // rebuild it.
va_end(ap);
Jun 23, 2014
Jun 23, 2014
827
const int retval = buffer_append(buffer, buf, len);
Nov 9, 2010
Nov 9, 2010
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
buffer->f(buf, buffer->d);
return retval;
} // buffer_append_va
size_t buffer_size(Buffer *buffer)
{
return buffer->total_bytes;
} // buffer_size
void buffer_empty(Buffer *buffer)
{
BufferBlock *item = buffer->head;
while (item != NULL)
{
BufferBlock *next = item->next;
buffer->f(item, buffer->d);
item = next;
} // while
buffer->head = buffer->tail = NULL;
buffer->total_bytes = 0;
} // buffer_empty
Nov 19, 2010
Nov 19, 2010
850
char *buffer_flatten(Buffer *buffer)
Nov 9, 2010
Nov 9, 2010
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
{
char *retval = (char *) buffer->m(buffer->total_bytes + 1, buffer->d);
if (retval == NULL)
return NULL;
BufferBlock *item = buffer->head;
char *ptr = retval;
while (item != NULL)
{
BufferBlock *next = item->next;
memcpy(ptr, item->data, item->bytes);
ptr += item->bytes;
buffer->f(item, buffer->d);
item = next;
} // while
*ptr = '\0';
assert(ptr == (retval + buffer->total_bytes));
buffer->head = buffer->tail = NULL;
buffer->total_bytes = 0;
return retval;
} // buffer_flatten
Nov 19, 2010
Nov 19, 2010
875
char *buffer_merge(Buffer **buffers, const size_t n, size_t *_len)
Nov 9, 2010
Nov 9, 2010
876
877
878
879
880
881
882
883
884
885
886
887
888
889
{
Buffer *first = NULL;
size_t len = 0;
size_t i;
for (i = 0; i < n; i++)
{
Buffer *buffer = buffers[i];
if (buffer == NULL)
continue;
if (first == NULL)
first = buffer;
len += buffer->total_bytes;
} // for
Nov 19, 2010
Nov 19, 2010
890
char *retval = (char *) (first ? first->m(len + 1, first->d) : NULL);
Nov 9, 2010
Nov 9, 2010
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
if (retval == NULL)
{
*_len = 0;
return NULL;
} // if
*_len = len;
char *ptr = retval;
for (i = 0; i < n; i++)
{
Buffer *buffer = buffers[i];
if (buffer == NULL)
continue;
BufferBlock *item = buffer->head;
while (item != NULL)
{
BufferBlock *next = item->next;
memcpy(ptr, item->data, item->bytes);
ptr += item->bytes;
buffer->f(item, buffer->d);
item = next;
} // while
buffer->head = buffer->tail = NULL;
buffer->total_bytes = 0;
} // for
*ptr = '\0';
assert(ptr == (retval + len));
return retval;
} // buffer_merge
void buffer_destroy(Buffer *buffer)
{
if (buffer != NULL)
{
MOJOSHADER_free f = buffer->f;
void *d = buffer->d;
buffer_empty(buffer);
f(buffer, d);
} // if
} // buffer_destroy
Nov 10, 2010
Nov 10, 2010
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
static int blockscmp(BufferBlock *item, const uint8 *data, size_t len)
{
if (len == 0)
return 1; // "match"
while (item != NULL)
{
const size_t itemremain = item->bytes;
const size_t avail = len < itemremain ? len : itemremain;
if (memcmp(item->data, data, avail) != 0)
return 0; // not a match.
if (len == avail)
return 1; // complete match!
len -= avail;
data += avail;
item = item->next;
} // while
return 0; // not a complete match.
} // blockscmp
ssize_t buffer_find(Buffer *buffer, const size_t start,
const void *_data, const size_t len)
{
if (len == 0)
return 0; // I guess that's right.
if (start >= buffer->total_bytes)
return -1; // definitely can't match.
if (len > (buffer->total_bytes - start))
return -1; // definitely can't match.
// Find the start point somewhere in the center of a buffer.
BufferBlock *item = buffer->head;
const uint8 *ptr = item->data;
size_t pos = 0;
if (start > 0)
{
while (1)
{
assert(item != NULL);
if ((pos + item->bytes) > start) // start is in this block.
{
ptr = item->data + (start - pos);
break;
} // if
pos += item->bytes;
item = item->next;
} // while
} // if
// okay, we're at the origin of the search.
assert(item != NULL);
assert(ptr != NULL);
const uint8 *data = (const uint8 *) _data;
const uint8 first = *data;
while (item != NULL)
{
const size_t itemremain = item->bytes - ((size_t)(ptr-item->data));
ptr = (uint8 *) memchr(ptr, first, itemremain);
while (ptr != NULL)