Annotation of dietlibc/ldso.c, revision 1.3
1.1 leitner 1: #define errno fnord
2: #include <unistd.h>
3: #include <fcntl.h>
4: #include <sys/mman.h>
5: #include <elf.h>
6: #include <stdlib.h>
7: #include <stdint.h>
8: #undef errno
9:
10: #if (__WORDSIZE == 64)
11:
12: #define phdr Elf64_Phdr
13: #define ehdr Elf64_Ehdr
14: #define shdr Elf64_Shdr
15: #define sym Elf64_Sym
16: #define dyn Elf64_Dyn
1.2 leitner 17: #define rela Elf64_Rela
18: #define R_SYM ELF64_R_SYM
19: #define R_TYPE ELF64_R_TYPE
1.1 leitner 20:
21: #else
22:
23: #define phdr Elf32_Phdr
24: #define ehdr Elf32_Ehdr
25: #define shdr Elf32_Shdr
26: #define sym Elf32_Sym
27: #define dyn Elf32_Dyn
1.2 leitner 28: #define rela Elf32_Rela
29: #define R_SYM ELF32_R_SYM
30: #define R_TYPE ELF32_R_TYPE
1.1 leitner 31:
32: #endif
33:
34: static int errno;
35: __attribute__((visibility("hidden"))) int* __errno_location(void) { return &errno; }
36:
1.2 leitner 37: static size_t _strlen(const char* s) {
1.1 leitner 38: size_t i;
39: for (i=0; s[i]; ++i);
40: return i;
41: }
42: static char* _stpcpy(char* dest,const char* src) {
43: size_t i;
44: for (i=0; src[i]; ++i)
45: dest[i]=src[i];
46: dest[i]=0;
47: return dest+i;
48: }
49: static char* _strchr(char* s,char c) {
50: size_t i;
51: for (i=0; s[i] && s[i]!=c; ++i) ;
52: if (s[i]==c) return s+i; else return NULL;
53: }
54: static int _strcmp(const void* str1,const void* str2) {
55: const unsigned char* a=str1;
56: const unsigned char* b=str2;
57: size_t i;
58: int r;
59: for (i=0; (r=(a[i]-b[i]))==0 && a[i]; ++i) ;
60: return r;
61: }
62: static int _memcmp(const void* dst,const void* src,size_t count) {
63: const unsigned char* a=dst;
64: const unsigned char* b=src;
65: size_t i;
66: int r;
67: for (i=0; i<count && (r=(a[i]-b[i]))==0; ++i) ;
68: return r;
69: }
70: static void* _memcpy(void* dst,const void* src,size_t len) {
71: char* a=dst;
72: const char* b=src;
73: size_t i;
74: for (i=0; i<len; ++i)
75: a[i]=b[i];
76: return dst;
77: }
78: static void _memset(void* dst,unsigned char c,size_t len) {
79: unsigned char* a=dst;
80: size_t i;
81: for (i=0; i<len; ++i) a[i]=c;
82: }
83: static ssize_t __write1(const char* s) {
84: write(1,s,_strlen(s));
85: }
86: static ssize_t __write2(const char* s) {
87: write(2,s,_strlen(s));
88: }
89:
90: ssize_t write(int fd,const void* buf,size_t len) __attribute__((visibility("hidden")));
91: int open(const char* pathname,int flags, ...) __attribute__((visibility("hidden")));
92: ssize_t read(int fd,void* buf,size_t len) __attribute__((visibility("hidden")));
93: int close(int fd) __attribute__((visibility("hidden")));
94: ssize_t pread64(int fd, void *buf, size_t count, off64_t offset) __attribute__((visibility("hidden")));;
95: void *mmap(void *__addr, size_t __len, int __prot, int __flags, int __fd, off_t __offset) __attribute__((visibility("hidden")));
96: int munmap(void *__addr, size_t __len) __attribute__((visibility("hidden")));
97: int mprotect (void *__addr, size_t __len, int __prot) __attribute__((visibility("hidden")));
98: void exit(int res) __attribute__((visibility("hidden")));
99:
100: static struct page {
101: struct page* next;
102: size_t a;
103: char data[4096-sizeof(size_t)-sizeof(struct page*)];
104: }* heap;
105:
106: static void* _malloc(size_t l) {
107: struct page** p;
108: if (l>sizeof(heap->data)) return 0;
109: if (l%(sizeof(void*)*2)) {
110: l += sizeof(void*)*2;
111: l -= l%(sizeof(void*)*2);
112: }
113: for (p=&heap; *p && (*p)->a<l; p=&((*p)->next)) ;
114: if (!*p) {
115: void* tmp=mmap(0,4096,PROT_READ|PROT_WRITE,MAP_ANONYMOUS|MAP_PRIVATE,-1,0);
116: if (tmp==MAP_FAILED) return 0;
117: *p=tmp;
118: (*p)->a=sizeof(heap->data);
119: }
120: if (l <= (*p)->a) {
121: char* tmp=(*p)->data+sizeof((*p)->data)-(*p)->a;
122: (*p)->a-=l;
123: return tmp;
124: } else
125: return 0; // can't happen
126: }
127:
128: static char path[100];
129: static char* ldlp;
130:
131: static struct dll {
132: struct dll* next;
133: ehdr* e;
134: void* code,* data;
135: size_t codelen,datalen,codeplus;
136: char name[1]; // asciiz of library name
137: } *dlls, dllroot;
138:
139: static int map_sections(int fd,const ehdr* e,const phdr* p,struct dll* D) {
140: size_t i;
141: uintptr_t codeplus=0;
142:
143: for (i=0; i<e->e_phnum; ++i) {
144: if (p[i].p_type==PT_LOAD) {
145: size_t delta=p[i].p_offset%4096;
146: size_t maplen=p[i].p_filesz+delta;
147: size_t bssdiff=(p[i].p_filesz+delta)%4096;
148: char* c;
149: if ((p[i].p_flags&PF_W) && (p[i].p_flags&PF_X)) {
150: __write2("section is both executable and writable, aborting!\n");
151: return 1;
152: }
1.3 ! leitner 153: if (!(p[i].p_flags&PF_W)) {
1.1 leitner 154: /* code segment */
155: size_t ofs,len,rolen=0,nolen=0,rolen2=0,vaddr=p[i].p_vaddr,baseofs=0;
156: /* the first segment will be the code segment, and it will have
157: * either a fixed address or 0 if it's a shared library. We
158: * insist that the mapping start at file offset 0, and we extend
159: * the mapping so it includes the section table */
160: ofs=p[i].p_offset;
161: len=p[i].p_filesz;
162: if (ofs) {
163: __write2("can't happen error: ofs!=0\n");
164: exit(1);
165: if (vaddr)
166: vaddr-=ofs;
167: else
168: baseofs=ofs;
169: rolen=ofs;
170: len+=ofs;
171: ofs=0;
172: }
173: if (ofs+len < e->e_shoff+e->e_shnum*e->e_shentsize) {
174: size_t needed=e->e_shoff+e->e_shnum*e->e_shentsize;
175: /* if this mapping does not include the section table is not
176: * included, extend the mapping to include it */
177: rolen2=e->e_shnum*e->e_shentsize;
178: if (rolen2>needed-len)
179: /* we were almost there, part of the section table was
180: * already mapped */
181: rolen2=needed-len;
182: else
183: nolen=needed-len-rolen2;
184: /*
185: * +------------------------+
186: * | rolen e-> | if the mapping did not start at beginning of file, this is the beginning of file, PROT_READ
187: * +------------------------+
188: * | len base-> | this is the actual mapping, base points here, PROT_READ|PROT_EXEC
189: * +------------------------+
190: * | nolen | stuff we don't really need and will mprotect PROT_NONE
191: * +------------------------+
192: * | rolen2 e+e->shoff | the section header table, PROT_READ
193: * +------------------------+
194: */
195: }
196: c=mmap((char*)vaddr,rolen+len+nolen+rolen2,
197: ((p[i].p_flags&PF_R)?PROT_READ:0) |
198: ((p[i].p_flags&PF_X)?PROT_EXEC:0),
199: MAP_SHARED|(vaddr?MAP_FIXED:0),
200: fd,0);
201: /* in case the can't happen branch ever happens */
202: D->e=(ehdr*)c;
203: D->code=c+rolen; D->codelen=len;
1.2 leitner 204: // D->s=(shdr*)(c+e->e_shoff);
1.1 leitner 205: if (rolen>=4096) /* if we extended the mapping in the front, remove exec permissions */
206: mprotect(c,rolen&~4095,PROT_READ);
1.3 ! leitner 207: if (!vaddr && !codeplus) codeplus=(uintptr_t)(c+rolen);
1.1 leitner 208: if (nolen) {
209: /* We mapped junk in the middle.
210: * If there are full pages in there, map them PROT_NONE */
211: char* start=c+rolen+len;
212: size_t len=nolen;
213: size_t diff=(-(uintptr_t)start & 4095);
214: if (diff < nolen) {
215: /* diff is the part at the beginning we need to skip because
216: * it's on a page we actually need to be executable.
217: * Now find out if we overshoot onto a page we want */
218: size_t removeatend=((uintptr_t)c+rolen+len)&4095;
219: mprotect(start+diff,len-diff-removeatend,PROT_NONE);
220: }
221: }
222: if (rolen2) {
223: /* Now we want to mprotect PROT_READ the section table.
224: * What makes this complex is that mprotect granularity is one
225: * page. First figure out the region we are interested in. */
226: char* start=c+rolen+len+nolen;
227: size_t len=rolen2;
228: /* we want to mprotect from start to start+len */
229: int diff=((uintptr_t)start)&4095;
230: /* This is the offset of start from the page start. We need
231: * to mprotect the whole page -- unless it also has code on
232: * it, in which case we need to leave it alone */
233: if (diff > nolen) {
234: size_t roundup=(-(uintptr_t)start)&4095;
235: /* we need to skip the first page. Let's see if there is
236: * anything left in that case */
237: start+=roundup;
238: if (rolen2>roundup)
239: len-=roundup;
240: else
241: len=0;
242: } else {
243: start-=diff;
244: len+=diff;
245: }
246: if (len) mprotect(start,len,PROT_READ);
247: }
248: } else if (p[i].p_flags&PF_W) { /* data segment */
249: /* We have already mapped the code segment to base.
250: * For programs, the base mapping of the code and data segment
251: * is absolute and we just map there with MAP_FIXED. For shared
252: * libraries however, the base mapping of the code segment is 0,
253: * and the vaddr of the data segment is not absolute but
254: * relative to the code segment */
255:
256: /* The data segment may not start on a page boundary. Round
257: * start address if necessary. */
258: size_t addr,ofs,len,memsetstart=0,memsetend=0,additional=0;
259: addr=p[i].p_vaddr+codeplus;
260: ofs=p[i].p_offset;
261: len=p[i].p_filesz;
262: if ((memsetstart=(ofs%4096))) {
263: len+=memsetstart;
264: ofs-=memsetstart;
265: addr-=memsetstart;
266: }
267:
268: /* The data segment consists of actual data, but a part of it is
269: * data initialized to 0, the .bss section. This section is not
270: * actually put in the file to save space, but the ELF loader
271: * (that's us) is expected to allocate that data at program
272: * start. */
273: memsetend=p[i].p_memsz-p[i].p_filesz;
274: if (memsetend) {
275: /* We have a .bss. We need to handle two cases. First: if
276: * the number of bytes is small enough to fit on the last page
277: * we already mapped for the data, all we need to do is memset
278: * it. Otherwise we needto memset the rest of that page and
279: * map some additional pages after it. */
280: size_t bytes_on_last_page=(-len)&4095;
281: if (memsetend<bytes_on_last_page)
282: len+=memsetend;
283: else {
284: len+=bytes_on_last_page;
285: additional=memsetend-bytes_on_last_page;
286: memsetend=bytes_on_last_page;
287: }
288: }
289:
290: c=mmap((char*)addr,len,PROT_READ|PROT_WRITE,MAP_PRIVATE|MAP_FIXED,fd,ofs);
291: if (c==MAP_FAILED) {
292: __write2("mmap failed!\n");
293: return 1;
294: }
295:
296: if (memsetstart) _memset(c,0,memsetstart);
297: if (memsetend) _memset(c+len-memsetend,0,memsetend);
298: if (additional) {
299: char* tmp=mmap(c+len,additional,PROT_READ|PROT_WRITE,MAP_PRIVATE|MAP_ANONYMOUS,-1,0);
300: if (tmp==MAP_FAILED) {
301: __write2("mmap failed!\n");
302: return 1;
303: }
304: }
305: D->data=c+memsetstart; D->datalen=len-memsetstart;
306: D->codeplus=codeplus;
1.3 ! leitner 307: } else {
! 308: __write2("can't happen error: LOAD segment that is neither code nor data.\n");
! 309: return 1;
1.1 leitner 310: }
311: }
312: }
313: return 0;
314: }
315:
316:
317: static int __loadlibrary(const char* fn) {
318: struct dll* D;
319: int fd;
320: char buf[1000];
321: ehdr* e;
322: phdr* p;
323: shdr* s;
324: size_t i;
325: char* code=0,* data=0;
326: char* base=0;
327: size_t codelen,datalen;
328: #if 0
329: __write1("trying ");
330: __write1(fn);
331: __write1("\n");
332: #endif
333: fd=open(fn,O_RDONLY);
334: if (fd==-1) return -1;
335: if (read(fd,buf,1000)<1000) {
336: kaputt:
337: close(fd);
338: return -1;
339: }
340: if (_memcmp(buf,"\177ELF",4)) goto kaputt;
341: e=(ehdr*)buf;
342:
343: if (e->e_ident[4] != (sizeof(size_t)/4) || // wrong word size
344: e->e_type != ET_DYN || // not shared object
345: e->e_machine != // for different architecture
346: #if defined(__x86_64__)
347: EM_X86_64
348: #elif defined(__i386__)
349: EM_386
350: #else
351: #error architecture not recognized, please add
352: #endif
353: ||
354: e->e_phentsize != sizeof(phdr) || // wrong program header entry size
355: e->e_phnum > 20 || // too many program header entries
356: e->e_shentsize !=sizeof(shdr) || // wrong section header entry size
357: e->e_shnum > 100) // too many sections
358: goto kaputt;
359:
360:
361: if (e->e_phoff>=1000 || e->e_phentsize*e->e_phnum>1000-e->e_phoff) {
362: size_t wanted=e->e_phentsize*e->e_phnum;
363: p=alloca(wanted);
364: if (pread64(fd,p,wanted,e->e_phoff)!=wanted)
365: goto kaputt;
366: } else
367: p=(phdr*)(buf+e->e_phoff);
368:
369: struct dll dll;
370: if (map_sections(fd,e,p,&dll)) {
371: __write2("map_sections failed\n");
372: goto kaputt;
373: }
374:
375: close(fd);
376:
377: {
378: const char* tmp;
379: for (i=0; fn[i]; ++i)
380: if (fn[i]=='/') tmp=fn+i;
381: ++tmp;
382: D=_malloc(sizeof(struct dll)+_strlen(tmp));
383: _stpcpy(D->name,tmp);
384: }
385: D->next=0;
386: D->code=dll.code; D->codelen=dll.codelen;
387: D->data=dll.data; D->datalen=dll.datalen;
1.2 leitner 388: // D->s=dll.s;
1.1 leitner 389: D->e=dll.e;
390: D->codeplus=dll.codeplus;
391: {
392: struct dll** x;
393: for (x=&dlls; *x; x=&(*x)->next) ;
394: *x=D;
395: }
396: return 0;
397: }
398:
399: static int loadlibrary(const char* fn) {
400: char lp[200];
401: int r;
402: char* c;
403: const char* shortname=fn;
404: struct dll* d;
405:
406: {
407: size_t i;
408: for (i=0; fn[i]; ++i)
409: if (fn[i]=='/') shortname=fn+i+1;
410: }
411:
412: if (_strlen(fn)>50) return -1;
413: for (d=dlls; d; d=d->next)
414: if (!_strcmp(d->name,shortname))
415: return 0;
416:
417: __write1("loadlibrary(\"");
418: __write1(fn);
419: __write1("\")\n");
420: if (fn[0]=='/') {
421: return __loadlibrary(fn);
422: }
423:
424: c=_stpcpy(lp,path);
425: *c++='/';
426: _stpcpy(c,fn);
427: r=__loadlibrary(lp);
428: if (r==0) return r;
429: if (ldlp) {
430: size_t i;
431: char* d;
432: c=ldlp;
433: for (i=0; ; ++i) {
434: again:
435: if (c[i]==':' || c[i]==0) {
436: if (i<100) {
437: lp[i]='/';
438: _stpcpy(lp+i+1,fn);
439: r=__loadlibrary(lp);
440: if (r==0) return;
441: }
442: if (c[i]==0) break;
443: c+=i+1; i=0; goto again;
444: } else
445: if (i<100) lp[i]=c[i];
446: }
447: }
448: return r;
449: }
450:
1.2 leitner 451: static int loadlibs(struct dll* D) {
1.1 leitner 452: size_t i;
1.2 leitner 453: phdr* p=(phdr*)((char*)D->e+D->e->e_phoff);
1.1 leitner 454: dyn* d;
455: size_t dnum,dynstrlen;
456: char* dynstr;
457:
458: /* we know we have exactly one code and exactly one data segment,
459: * otherwise we wouldn't have gotten this far */
1.2 leitner 460: for (i=0; i<D->e->e_phnum; ++i)
461: if (p[i].p_type==PT_DYNAMIC) {
462: d=(dyn*)((char*)p[i].p_vaddr+D->codeplus);
463: dnum=p[i].p_memsz/sizeof(dyn);
1.3 ! leitner 464: break;
1.1 leitner 465: }
1.2 leitner 466: for (i=0; i<dnum; ++i)
1.3 ! leitner 467: if (d[i].d_tag==DT_STRTAB) {
1.2 leitner 468: dynstr=(char*)d[i].d_un.d_ptr+D->codeplus;
1.3 ! leitner 469: break;
! 470: } else if (d[i].d_tag==DT_NULL)
! 471: break;
1.1 leitner 472:
473: /* we now have a dynamic section we can traverse */
474: for (i=0; i<dnum; ++i) {
475: if (d[i].d_tag==DT_NEEDED) {
476: if (loadlibrary(dynstr+d[i].d_un.d_val)) {
477: __write2("library ");
478: __write2(dynstr+d[i].d_un.d_val);
479: __write2(" not found!\n");
480: exit(2);
481: }
1.3 ! leitner 482: } else if (d[i].d_tag==DT_NULL)
! 483: break;
1.1 leitner 484: }
485:
486: return 0;
487: }
488:
489: static unsigned int elf_hash(const unsigned char *name) {
490: unsigned int h=0, g;
491:
492: while (*name) {
493: h = (h<<4) + *(name++);
494: if ((g = h&0xf0000000)) h ^= g>>24;
495: h ^= g;
496: }
497: return h;
498: }
499:
500: static uint_fast32_t gnu_hash(const unsigned char *s) {
501: unsigned char c;
502: uint_fast32_t h=5381;
503: for(c=*s;(c!='\0');c=*++s) {
504: // h=h*33+c;
505: h=((h<<5)+h)+c;
506: }
507: return (h&0xffffffff);
508: }
509:
1.2 leitner 510: static char* dlsym_int(const char* symbol,struct dll* x) {
511: for (; x; x=x->next) {
1.1 leitner 512: size_t i;
513: dyn* d;
514: sym* sy;
1.2 leitner 515: phdr* p=(phdr*)(x->e->e_phoff+(char*)x->e);
1.1 leitner 516: const char* strtab;
517: size_t dnum;
518: int* hash=0;
1.2 leitner 519: for (i=0; i<x->e->e_phnum; ++i)
520: if (p[i].p_type==PT_DYNAMIC) {
521: d=(dyn*)(x->codeplus + p[i].p_vaddr);
522: dnum=p[i].p_memsz/sizeof(dyn);
1.1 leitner 523: break;
524: }
525:
526: for (i=0; i<dnum; ++i) {
527: if (d[i].d_tag == DT_HASH)
528: hash=(int*)((char*)x->codeplus + d[i].d_un.d_ptr);
529: else if (d[i].d_tag == DT_SYMTAB)
530: sy=(sym*)((char*)(x->codeplus+d[i].d_un.d_ptr));
531: else if (d[i].d_tag == DT_STRTAB)
532: strtab=(char*)(x->codeplus+d[i].d_un.d_ptr);
533: }
534:
535: /* hash[0] is the number of buckets. */
536: /* hash[1] is the hash chain length, not used here */
537: size_t bhash=elf_hash(symbol)%hash[0];
538: unsigned int* chain=hash+2+hash[0];
539: unsigned int index;
540: for (index=(hash+2)[bhash]; index; index=chain[index]) {
541: #if 0
542: __write1(strtab+sy[index].st_name); __write1("\n");
543: #endif
544: if (sy[index].st_value && sy[index].st_shndx!=SHN_UNDEF && !_strcmp(strtab+sy[index].st_name,symbol)) {
545: return (char*)x->codeplus+sy[index].st_value;
546: }
547: }
548: #if 0
549: if (x->next) {
550: __write1(" ... next: ");
551: __write1(x->next->name);
552: __write1("\n");
553: }
554: #endif
555: }
556: return 0;
557: }
558:
1.2 leitner 559: static void* dlsym(const char* s) {
560: return dlsym_int(s,&dllroot);
561: }
562:
563: static void* _dlsym(const char* s) {
1.3 ! leitner 564: void* x=dlsym(s);
1.2 leitner 565: if (!x) {
566: __write2("ld.so: lookup of symbol \"");
567: __write2(s);
568: __write2("\" failed.\n");
569: // exit(1);
570: }
571: return x;
572: }
573:
574: static void resolve(struct dll* D) {
575: size_t i;
576: phdr* p=(phdr*)((char*)D->e+D->e->e_phoff);
577: dyn* d=0;
578: size_t dnum,dynstrlen,rnum=0;
579: char* dynstr=0, *pltgot=0, *pltrel=0;
580: rela* r=0;
581: sym* symtab=0;
582:
583: /* we know we have exactly one code and exactly one data segment,
584: * otherwise we wouldn't have gotten this far */
585: for (i=0; i<D->e->e_phnum; ++i)
586: if (p[i].p_type==PT_DYNAMIC) {
587: d=(dyn*)((char*)p[i].p_vaddr+D->codeplus);
588: dnum=p[i].p_memsz/sizeof(dyn);
589: break;
590: }
591: for (i=0; i<dnum; ++i)
592: if (d[i].d_tag==DT_STRTAB)
593: dynstr=(char*)d[i].d_un.d_ptr+D->codeplus;
594: else if (d[i].d_tag==DT_RELA)
595: r=(rela*)((char*)d[i].d_un.d_ptr+D->codeplus);
596: else if (d[i].d_tag==DT_RELASZ)
597: rnum=d[i].d_un.d_val/sizeof(rela);
598: else if (d[i].d_tag==DT_SYMTAB)
599: symtab=(sym*)((char*)d[i].d_un.d_ptr+D->codeplus);
600: else if (d[i].d_tag==0)
601: break;
602:
603: for (i=0; i<rnum; ++i) {
604: size_t* x=(size_t*)((char*)(r[i].r_offset+D->codeplus));
1.3 ! leitner 605: char* y;
1.2 leitner 606: size_t sym=R_SYM(r[i].r_info);
607: switch (R_TYPE(r[i].r_info)) {
608: #if defined(__x86_64__)
609: case R_X86_64_64:
610: *x=D->codeplus+symtab[sym].st_value;
611: break;
612: case R_X86_64_COPY:
1.3 ! leitner 613: y=dlsym_int(symtab[sym].st_name+dynstr,D->next);
! 614: if (!y && ELF32_ST_BIND(symtab[sym].st_info) != STB_WEAK) {
! 615: __write2("symbol lookup failed: ");
! 616: __write2(dynstr+symtab[sym].st_name);
! 617: __write2("\n");
! 618: exit(1);
! 619: }
! 620: _memcpy(x,y,symtab[sym].st_size);
1.2 leitner 621: break;
622: case R_X86_64_GLOB_DAT:
623: case R_X86_64_JUMP_SLOT:
1.3 ! leitner 624: y=dlsym(symtab[sym].st_name+dynstr);
! 625: if (!y && ELF32_ST_BIND(symtab[sym].st_info) != STB_WEAK) {
! 626: __write2("symbol lookup failed: ");
! 627: __write2(dynstr+symtab[sym].st_name);
! 628: __write2("\n");
! 629: exit(1);
! 630: }
! 631: *x=(uintptr_t)y;
1.2 leitner 632: break;
633: case R_X86_64_RELATIVE:
634: *x=r[i].r_addend+D->codeplus;
635: break;
636: case R_X86_64_32:
637: *(uint32_t*)x=*(uint32_t*)_dlsym(symtab[sym].st_name+dynstr)+r[i].r_addend;
638: break;
639: default:
640: __write2("unknown relocation type!\n");
641: exit(1);
642: break;
643: #else
644: #error fixme: add relocation types for your platform
645: #endif
646: }
647: }
648:
649: return;
650: }
651:
1.1 leitner 652: int main(int argc,char* argv[],char* envp[]) {
653: int fd;
654: size_t l;
655: char* m;
656: char buf[1000];
657: ehdr* e;
658: phdr* p;
659: phdr* code=0,* data=0;
660: shdr* s;
661: dyn* d;
662: size_t dnum;
663: char* dynstr;
664: size_t dynstrlen;
665: size_t i;
666:
667: #if 0
668: {
669: fd=open("/proc/self/maps",O_RDONLY);
670: if (fd!=-1) {
671: size_t l;
672: do {
673: l=read(fd,buf,sizeof(buf));
674: write(1,buf,l);
675: } while (l==sizeof(buf));
676: close(fd);
677: }
678: }
679: #endif
680: if (argc<2) {
681: __write2("usage: ld.so /path/to/binary\n");
682: return 0;
683: }
684: fd=open("/etc/diet.ld.conf",O_RDONLY);
685: if (fd!=-1) {
686: int r=read(fd,path,99);
687: if (r>0) path[r]=0;
688: while (r>0 && path[r-1]=='\n') path[--r]=0;
689: close(fd);
690: }
691: for (i=0; envp[i]; ++i) {
692: if (_memcmp(envp[i],"LD_LIBRARY_PATH=",16)==0)
693: ldlp=envp[i]+16;
694: }
695: fd=open(argv[1],O_RDONLY);
696: if (fd==-1) {
697: __write2("could not open \"");
698: __write2(argv[1]);
699: __write2("\".\n");
700: return 1;
701: }
702: l=read(fd,buf,1000);
703: if (l<sizeof(ehdr) || _memcmp(buf,"\177ELF",4)) {
704: kaputt:
705: __write2("invalid ELF file \"");
706: close(fd);
707: return 1;
708: }
709: e=(ehdr*)buf;
710:
711: if (e->e_ident[4] != (sizeof(size_t)/4)) {
712: __write2("wrong word size!\n");
713: return 1;
714: }
715: #if 0
716: if (e->e_ident[7] != ELFOSABI_LINUX) {
717: __write2("ABI not Linux!\n");
718: return 1;
719: }
720: #endif
721: if (e->e_type != ET_EXEC) {
722: __write2("not an executable!\n");
723: return 1;
724: }
725: if (e->e_machine !=
726: #if defined(__x86_64__)
727: EM_X86_64
728: #elif defined(__i386__)
729: EM_386
730: #else
731: #error architecture not recognized, please add
732: #endif
733: ) {
734: __write2("invalid architecture!\n");
735: return 1;
736: }
737:
738: if (e->e_phentsize != sizeof(phdr)) {
739: __write2("invalid phentsize!\n");
740: return 1;
741: }
742: if (e->e_phnum > 20) {
743: __write2("phnum too large!\n");
744: return 1;
745: }
746: if (e->e_shentsize != sizeof(shdr)) {
747: __write2("invalid shentsize!\n");
748: return 1;
749: }
750: if (e->e_shnum > 100) {
751: __write2("shnum too large!\n");
752: return 1;
753: }
754:
755: if (e->e_phoff>=l || e->e_phentsize*e->e_phnum>l-e->e_phoff) {
756: size_t wanted=e->e_phentsize*e->e_phnum;
757: p=alloca(wanted);
758: if (pread64(fd,p,wanted,e->e_phoff)!=wanted)
759: goto kaputt;
760: } else
761: p=(phdr*)(buf+e->e_phoff);
762:
763: dlls=&dllroot;
764: if (map_sections(fd,e,p,&dllroot)) {
765: __write2("map_sections failed!\n");
766: return 1;
767: }
768: close(fd);
769:
1.2 leitner 770: loadlibs(&dllroot);
1.1 leitner 771:
772: /* now load the prerequisites of the libraries we loaded */
773: {
774: struct dll* x;
775: for (x=dlls; x; x=x->next) {
1.2 leitner 776: loadlibs(x);
1.1 leitner 777: }
778: }
779:
1.2 leitner 780: resolve(&dllroot);
781:
1.3 ! leitner 782: __write2("jumping...\n");
! 783:
1.2 leitner 784: {
1.3 ! leitner 785: int (*_init)(int argc,char* argv[],char* envp[])=(void*)(e->e_entry+dllroot.codeplus);
1.2 leitner 786: return _init(argc,argv,envp);
787: }
788: #if 0
1.1 leitner 789: {
790: char* x=dlsym("theint");
1.2 leitner 791: __write1("done\n");
1.1 leitner 792: }
1.2 leitner 793: #endif
1.1 leitner 794:
795: #if 0
796: printf("jump to %p\n",e->e_entry);
797: for (i=0; i<16; ++i) {
798: printf("%02x ",((unsigned char*)e->e_entry)[i]);
799: }
800: #endif
801:
802: return 0;
803: }
LinuxTV legacy CVS <linuxtv.org/cvs>