Annotation of dietlibc/ldso.c, revision 1.2
1.1 leitner 1: #define errno fnord
2: #include <unistd.h>
3: #include <fcntl.h>
4: #include <sys/mman.h>
5: #include <elf.h>
6: #include <stdlib.h>
7: #include <stdint.h>
8: #undef errno
9:
10: #if (__WORDSIZE == 64)
11:
12: #define phdr Elf64_Phdr
13: #define ehdr Elf64_Ehdr
14: #define shdr Elf64_Shdr
15: #define sym Elf64_Sym
16: #define dyn Elf64_Dyn
1.2 ! leitner 17: #define rela Elf64_Rela
! 18: #define R_SYM ELF64_R_SYM
! 19: #define R_TYPE ELF64_R_TYPE
1.1 leitner 20:
21: #else
22:
23: #define phdr Elf32_Phdr
24: #define ehdr Elf32_Ehdr
25: #define shdr Elf32_Shdr
26: #define sym Elf32_Sym
27: #define dyn Elf32_Dyn
1.2 ! leitner 28: #define rela Elf32_Rela
! 29: #define R_SYM ELF32_R_SYM
! 30: #define R_TYPE ELF32_R_TYPE
1.1 leitner 31:
32: #endif
33:
34: static int errno;
35: __attribute__((visibility("hidden"))) int* __errno_location(void) { return &errno; }
36:
1.2 ! leitner 37: static size_t _strlen(const char* s) {
1.1 leitner 38: size_t i;
39: for (i=0; s[i]; ++i);
40: return i;
41: }
42: static char* _stpcpy(char* dest,const char* src) {
43: size_t i;
44: for (i=0; src[i]; ++i)
45: dest[i]=src[i];
46: dest[i]=0;
47: return dest+i;
48: }
49: static char* _strchr(char* s,char c) {
50: size_t i;
51: for (i=0; s[i] && s[i]!=c; ++i) ;
52: if (s[i]==c) return s+i; else return NULL;
53: }
54: static int _strcmp(const void* str1,const void* str2) {
55: const unsigned char* a=str1;
56: const unsigned char* b=str2;
57: size_t i;
58: int r;
59: for (i=0; (r=(a[i]-b[i]))==0 && a[i]; ++i) ;
60: return r;
61: }
62: static int _memcmp(const void* dst,const void* src,size_t count) {
63: const unsigned char* a=dst;
64: const unsigned char* b=src;
65: size_t i;
66: int r;
67: for (i=0; i<count && (r=(a[i]-b[i]))==0; ++i) ;
68: return r;
69: }
70: static void* _memcpy(void* dst,const void* src,size_t len) {
71: char* a=dst;
72: const char* b=src;
73: size_t i;
74: for (i=0; i<len; ++i)
75: a[i]=b[i];
76: return dst;
77: }
78: static void _memset(void* dst,unsigned char c,size_t len) {
79: unsigned char* a=dst;
80: size_t i;
81: for (i=0; i<len; ++i) a[i]=c;
82: }
83: static ssize_t __write1(const char* s) {
84: write(1,s,_strlen(s));
85: }
86: static ssize_t __write2(const char* s) {
87: write(2,s,_strlen(s));
88: }
89:
90: ssize_t write(int fd,const void* buf,size_t len) __attribute__((visibility("hidden")));
91: int open(const char* pathname,int flags, ...) __attribute__((visibility("hidden")));
92: ssize_t read(int fd,void* buf,size_t len) __attribute__((visibility("hidden")));
93: int close(int fd) __attribute__((visibility("hidden")));
94: ssize_t pread64(int fd, void *buf, size_t count, off64_t offset) __attribute__((visibility("hidden")));;
95: void *mmap(void *__addr, size_t __len, int __prot, int __flags, int __fd, off_t __offset) __attribute__((visibility("hidden")));
96: int munmap(void *__addr, size_t __len) __attribute__((visibility("hidden")));
97: int mprotect (void *__addr, size_t __len, int __prot) __attribute__((visibility("hidden")));
98: void exit(int res) __attribute__((visibility("hidden")));
99:
100: static struct page {
101: struct page* next;
102: size_t a;
103: char data[4096-sizeof(size_t)-sizeof(struct page*)];
104: }* heap;
105:
106: static void* _malloc(size_t l) {
107: struct page** p;
108: if (l>sizeof(heap->data)) return 0;
109: if (l%(sizeof(void*)*2)) {
110: l += sizeof(void*)*2;
111: l -= l%(sizeof(void*)*2);
112: }
113: for (p=&heap; *p && (*p)->a<l; p=&((*p)->next)) ;
114: if (!*p) {
115: void* tmp=mmap(0,4096,PROT_READ|PROT_WRITE,MAP_ANONYMOUS|MAP_PRIVATE,-1,0);
116: if (tmp==MAP_FAILED) return 0;
117: *p=tmp;
118: (*p)->a=sizeof(heap->data);
119: }
120: if (l <= (*p)->a) {
121: char* tmp=(*p)->data+sizeof((*p)->data)-(*p)->a;
122: (*p)->a-=l;
123: return tmp;
124: } else
125: return 0; // can't happen
126: }
127:
128: static char path[100];
129: static char* ldlp;
130:
131: static struct dll {
132: struct dll* next;
133: ehdr* e;
134: void* code,* data;
135: size_t codelen,datalen,codeplus;
136: char name[1]; // asciiz of library name
137: } *dlls, dllroot;
138:
139: static int map_sections(int fd,const ehdr* e,const phdr* p,struct dll* D) {
140: size_t i;
141: uintptr_t codeplus=0;
142:
143: for (i=0; i<e->e_phnum; ++i) {
144: if (p[i].p_type==PT_LOAD) {
145: size_t delta=p[i].p_offset%4096;
146: size_t maplen=p[i].p_filesz+delta;
147: size_t bssdiff=(p[i].p_filesz+delta)%4096;
148: char* c;
149: if ((p[i].p_flags&PF_W) && (p[i].p_flags&PF_X)) {
150: __write2("section is both executable and writable, aborting!\n");
151: return 1;
152: }
153: if (p[i].p_flags&PF_X) {
154: /* code segment */
155: size_t ofs,len,rolen=0,nolen=0,rolen2=0,vaddr=p[i].p_vaddr,baseofs=0;
156: /* the first segment will be the code segment, and it will have
157: * either a fixed address or 0 if it's a shared library. We
158: * insist that the mapping start at file offset 0, and we extend
159: * the mapping so it includes the section table */
160: ofs=p[i].p_offset;
161: len=p[i].p_filesz;
162: if (ofs) {
163: __write2("can't happen error: ofs!=0\n");
164: exit(1);
165: if (vaddr)
166: vaddr-=ofs;
167: else
168: baseofs=ofs;
169: rolen=ofs;
170: len+=ofs;
171: ofs=0;
172: }
173: if (ofs+len < e->e_shoff+e->e_shnum*e->e_shentsize) {
174: size_t needed=e->e_shoff+e->e_shnum*e->e_shentsize;
175: /* if this mapping does not include the section table is not
176: * included, extend the mapping to include it */
177: rolen2=e->e_shnum*e->e_shentsize;
178: if (rolen2>needed-len)
179: /* we were almost there, part of the section table was
180: * already mapped */
181: rolen2=needed-len;
182: else
183: nolen=needed-len-rolen2;
184: /*
185: * +------------------------+
186: * | rolen e-> | if the mapping did not start at beginning of file, this is the beginning of file, PROT_READ
187: * +------------------------+
188: * | len base-> | this is the actual mapping, base points here, PROT_READ|PROT_EXEC
189: * +------------------------+
190: * | nolen | stuff we don't really need and will mprotect PROT_NONE
191: * +------------------------+
192: * | rolen2 e+e->shoff | the section header table, PROT_READ
193: * +------------------------+
194: */
195: }
196: c=mmap((char*)vaddr,rolen+len+nolen+rolen2,
197: ((p[i].p_flags&PF_R)?PROT_READ:0) |
198: ((p[i].p_flags&PF_X)?PROT_EXEC:0),
199: MAP_SHARED|(vaddr?MAP_FIXED:0),
200: fd,0);
201: /* in case the can't happen branch ever happens */
202: D->e=(ehdr*)c;
203: D->code=c+rolen; D->codelen=len;
1.2 ! leitner 204: // D->s=(shdr*)(c+e->e_shoff);
1.1 leitner 205: if (rolen>=4096) /* if we extended the mapping in the front, remove exec permissions */
206: mprotect(c,rolen&~4095,PROT_READ);
207: if (!vaddr) codeplus=(uintptr_t)(c+rolen);
208: if (nolen) {
209: /* We mapped junk in the middle.
210: * If there are full pages in there, map them PROT_NONE */
211: char* start=c+rolen+len;
212: size_t len=nolen;
213: size_t diff=(-(uintptr_t)start & 4095);
214: if (diff < nolen) {
215: /* diff is the part at the beginning we need to skip because
216: * it's on a page we actually need to be executable.
217: * Now find out if we overshoot onto a page we want */
218: size_t removeatend=((uintptr_t)c+rolen+len)&4095;
219: mprotect(start+diff,len-diff-removeatend,PROT_NONE);
220: }
221: }
222: if (rolen2) {
223: /* Now we want to mprotect PROT_READ the section table.
224: * What makes this complex is that mprotect granularity is one
225: * page. First figure out the region we are interested in. */
226: char* start=c+rolen+len+nolen;
227: size_t len=rolen2;
228: /* we want to mprotect from start to start+len */
229: int diff=((uintptr_t)start)&4095;
230: /* This is the offset of start from the page start. We need
231: * to mprotect the whole page -- unless it also has code on
232: * it, in which case we need to leave it alone */
233: if (diff > nolen) {
234: size_t roundup=(-(uintptr_t)start)&4095;
235: /* we need to skip the first page. Let's see if there is
236: * anything left in that case */
237: start+=roundup;
238: if (rolen2>roundup)
239: len-=roundup;
240: else
241: len=0;
242: } else {
243: start-=diff;
244: len+=diff;
245: }
246: if (len) mprotect(start,len,PROT_READ);
247: }
248: } else if (p[i].p_flags&PF_W) { /* data segment */
249: /* We have already mapped the code segment to base.
250: * For programs, the base mapping of the code and data segment
251: * is absolute and we just map there with MAP_FIXED. For shared
252: * libraries however, the base mapping of the code segment is 0,
253: * and the vaddr of the data segment is not absolute but
254: * relative to the code segment */
255:
256: /* The data segment may not start on a page boundary. Round
257: * start address if necessary. */
258: size_t addr,ofs,len,memsetstart=0,memsetend=0,additional=0;
259: addr=p[i].p_vaddr+codeplus;
260: ofs=p[i].p_offset;
261: len=p[i].p_filesz;
262: if ((memsetstart=(ofs%4096))) {
263: len+=memsetstart;
264: ofs-=memsetstart;
265: addr-=memsetstart;
266: }
267:
268: /* The data segment consists of actual data, but a part of it is
269: * data initialized to 0, the .bss section. This section is not
270: * actually put in the file to save space, but the ELF loader
271: * (that's us) is expected to allocate that data at program
272: * start. */
273: memsetend=p[i].p_memsz-p[i].p_filesz;
274: if (memsetend) {
275: /* We have a .bss. We need to handle two cases. First: if
276: * the number of bytes is small enough to fit on the last page
277: * we already mapped for the data, all we need to do is memset
278: * it. Otherwise we needto memset the rest of that page and
279: * map some additional pages after it. */
280: size_t bytes_on_last_page=(-len)&4095;
281: if (memsetend<bytes_on_last_page)
282: len+=memsetend;
283: else {
284: len+=bytes_on_last_page;
285: additional=memsetend-bytes_on_last_page;
286: memsetend=bytes_on_last_page;
287: }
288: }
289:
290: c=mmap((char*)addr,len,PROT_READ|PROT_WRITE,MAP_PRIVATE|MAP_FIXED,fd,ofs);
291: if (c==MAP_FAILED) {
292: __write2("mmap failed!\n");
293: return 1;
294: }
295:
296: if (memsetstart) _memset(c,0,memsetstart);
297: if (memsetend) _memset(c+len-memsetend,0,memsetend);
298: if (additional) {
299: char* tmp=mmap(c+len,additional,PROT_READ|PROT_WRITE,MAP_PRIVATE|MAP_ANONYMOUS,-1,0);
300: if (tmp==MAP_FAILED) {
301: __write2("mmap failed!\n");
302: return 1;
303: }
304: }
305: D->data=c+memsetstart; D->datalen=len-memsetstart;
306: D->codeplus=codeplus;
307: }
308: }
309: }
310: return 0;
311: }
312:
313:
314: static int __loadlibrary(const char* fn) {
315: struct dll* D;
316: int fd;
317: char buf[1000];
318: ehdr* e;
319: phdr* p;
320: shdr* s;
321: size_t i;
322: char* code=0,* data=0;
323: char* base=0;
324: size_t codelen,datalen;
325: #if 0
326: __write1("trying ");
327: __write1(fn);
328: __write1("\n");
329: #endif
330: fd=open(fn,O_RDONLY);
331: if (fd==-1) return -1;
332: if (read(fd,buf,1000)<1000) {
333: kaputt:
334: close(fd);
335: return -1;
336: }
337: if (_memcmp(buf,"\177ELF",4)) goto kaputt;
338: e=(ehdr*)buf;
339:
340: if (e->e_ident[4] != (sizeof(size_t)/4) || // wrong word size
341: e->e_type != ET_DYN || // not shared object
342: e->e_machine != // for different architecture
343: #if defined(__x86_64__)
344: EM_X86_64
345: #elif defined(__i386__)
346: EM_386
347: #else
348: #error architecture not recognized, please add
349: #endif
350: ||
351: e->e_phentsize != sizeof(phdr) || // wrong program header entry size
352: e->e_phnum > 20 || // too many program header entries
353: e->e_shentsize !=sizeof(shdr) || // wrong section header entry size
354: e->e_shnum > 100) // too many sections
355: goto kaputt;
356:
357:
358: if (e->e_phoff>=1000 || e->e_phentsize*e->e_phnum>1000-e->e_phoff) {
359: size_t wanted=e->e_phentsize*e->e_phnum;
360: p=alloca(wanted);
361: if (pread64(fd,p,wanted,e->e_phoff)!=wanted)
362: goto kaputt;
363: } else
364: p=(phdr*)(buf+e->e_phoff);
365:
366: struct dll dll;
367: if (map_sections(fd,e,p,&dll)) {
368: __write2("map_sections failed\n");
369: goto kaputt;
370: }
371:
372: close(fd);
373:
374: {
375: const char* tmp;
376: for (i=0; fn[i]; ++i)
377: if (fn[i]=='/') tmp=fn+i;
378: ++tmp;
379: D=_malloc(sizeof(struct dll)+_strlen(tmp));
380: _stpcpy(D->name,tmp);
381: }
382: D->next=0;
383: D->code=dll.code; D->codelen=dll.codelen;
384: D->data=dll.data; D->datalen=dll.datalen;
1.2 ! leitner 385: // D->s=dll.s;
1.1 leitner 386: D->e=dll.e;
387: D->codeplus=dll.codeplus;
388: {
389: struct dll** x;
390: for (x=&dlls; *x; x=&(*x)->next) ;
391: *x=D;
392: }
393: return 0;
394: }
395:
396: static int loadlibrary(const char* fn) {
397: char lp[200];
398: int r;
399: char* c;
400: const char* shortname=fn;
401: struct dll* d;
402:
403: {
404: size_t i;
405: for (i=0; fn[i]; ++i)
406: if (fn[i]=='/') shortname=fn+i+1;
407: }
408:
409: if (_strlen(fn)>50) return -1;
410: for (d=dlls; d; d=d->next)
411: if (!_strcmp(d->name,shortname))
412: return 0;
413:
414: __write1("loadlibrary(\"");
415: __write1(fn);
416: __write1("\")\n");
417: if (fn[0]=='/') {
418: return __loadlibrary(fn);
419: }
420:
421: c=_stpcpy(lp,path);
422: *c++='/';
423: _stpcpy(c,fn);
424: r=__loadlibrary(lp);
425: if (r==0) return r;
426: if (ldlp) {
427: size_t i;
428: char* d;
429: c=ldlp;
430: for (i=0; ; ++i) {
431: again:
432: if (c[i]==':' || c[i]==0) {
433: if (i<100) {
434: lp[i]='/';
435: _stpcpy(lp+i+1,fn);
436: r=__loadlibrary(lp);
437: if (r==0) return;
438: }
439: if (c[i]==0) break;
440: c+=i+1; i=0; goto again;
441: } else
442: if (i<100) lp[i]=c[i];
443: }
444: }
445: return r;
446: }
447:
1.2 ! leitner 448: static int loadlibs(struct dll* D) {
1.1 leitner 449: size_t i;
1.2 ! leitner 450: phdr* p=(phdr*)((char*)D->e+D->e->e_phoff);
1.1 leitner 451: dyn* d;
452: size_t dnum,dynstrlen;
453: char* dynstr;
454:
455: /* we know we have exactly one code and exactly one data segment,
456: * otherwise we wouldn't have gotten this far */
1.2 ! leitner 457: for (i=0; i<D->e->e_phnum; ++i)
! 458: if (p[i].p_type==PT_DYNAMIC) {
! 459: d=(dyn*)((char*)p[i].p_vaddr+D->codeplus);
! 460: dnum=p[i].p_memsz/sizeof(dyn);
1.1 leitner 461: }
1.2 ! leitner 462: for (i=0; i<dnum; ++i)
! 463: if (d[i].d_tag==DT_STRTAB)
! 464: dynstr=(char*)d[i].d_un.d_ptr+D->codeplus;
1.1 leitner 465:
466: /* we now have a dynamic section we can traverse */
467: for (i=0; i<dnum; ++i) {
468: if (d[i].d_tag==DT_NEEDED) {
469: if (loadlibrary(dynstr+d[i].d_un.d_val)) {
470: __write2("library ");
471: __write2(dynstr+d[i].d_un.d_val);
472: __write2(" not found!\n");
473: exit(2);
474: }
475: }
476: }
477:
478: return 0;
479: }
480:
481: static unsigned int elf_hash(const unsigned char *name) {
482: unsigned int h=0, g;
483:
484: while (*name) {
485: h = (h<<4) + *(name++);
486: if ((g = h&0xf0000000)) h ^= g>>24;
487: h ^= g;
488: }
489: return h;
490: }
491:
492: static uint_fast32_t gnu_hash(const unsigned char *s) {
493: unsigned char c;
494: uint_fast32_t h=5381;
495: for(c=*s;(c!='\0');c=*++s) {
496: // h=h*33+c;
497: h=((h<<5)+h)+c;
498: }
499: return (h&0xffffffff);
500: }
501:
1.2 ! leitner 502: static char* dlsym_int(const char* symbol,struct dll* x) {
! 503: for (; x; x=x->next) {
1.1 leitner 504: size_t i;
505: dyn* d;
506: sym* sy;
1.2 ! leitner 507: phdr* p=(phdr*)(x->e->e_phoff+(char*)x->e);
1.1 leitner 508: const char* strtab;
509: size_t dnum;
510: int* hash=0;
1.2 ! leitner 511: for (i=0; i<x->e->e_phnum; ++i)
! 512: if (p[i].p_type==PT_DYNAMIC) {
! 513: d=(dyn*)(x->codeplus + p[i].p_vaddr);
! 514: dnum=p[i].p_memsz/sizeof(dyn);
1.1 leitner 515: break;
516: }
517:
518: for (i=0; i<dnum; ++i) {
519: if (d[i].d_tag == DT_HASH)
520: hash=(int*)((char*)x->codeplus + d[i].d_un.d_ptr);
521: else if (d[i].d_tag == DT_SYMTAB)
522: sy=(sym*)((char*)(x->codeplus+d[i].d_un.d_ptr));
523: else if (d[i].d_tag == DT_STRTAB)
524: strtab=(char*)(x->codeplus+d[i].d_un.d_ptr);
525: }
526:
527: /* hash[0] is the number of buckets. */
528: /* hash[1] is the hash chain length, not used here */
529: size_t bhash=elf_hash(symbol)%hash[0];
530: unsigned int* chain=hash+2+hash[0];
531: unsigned int index;
532: for (index=(hash+2)[bhash]; index; index=chain[index]) {
533: #if 0
534: __write1(strtab+sy[index].st_name); __write1("\n");
535: #endif
536: if (sy[index].st_value && sy[index].st_shndx!=SHN_UNDEF && !_strcmp(strtab+sy[index].st_name,symbol)) {
537: return (char*)x->codeplus+sy[index].st_value;
538: }
539: }
540: #if 0
541: if (x->next) {
542: __write1(" ... next: ");
543: __write1(x->next->name);
544: __write1("\n");
545: }
546: #endif
547: }
548: return 0;
549: }
550:
1.2 ! leitner 551: static void* dlsym(const char* s) {
! 552: return dlsym_int(s,&dllroot);
! 553: }
! 554:
! 555: static void* _dlsym(const char* s) {
! 556: const void* x=dlsym(s);
! 557: if (!x) {
! 558: __write2("ld.so: lookup of symbol \"");
! 559: __write2(s);
! 560: __write2("\" failed.\n");
! 561: // exit(1);
! 562: }
! 563: return x;
! 564: }
! 565:
! 566: static void resolve(struct dll* D) {
! 567: size_t i;
! 568: phdr* p=(phdr*)((char*)D->e+D->e->e_phoff);
! 569: dyn* d=0;
! 570: size_t dnum,dynstrlen,rnum=0;
! 571: char* dynstr=0, *pltgot=0, *pltrel=0;
! 572: rela* r=0;
! 573: sym* symtab=0;
! 574:
! 575: /* we know we have exactly one code and exactly one data segment,
! 576: * otherwise we wouldn't have gotten this far */
! 577: for (i=0; i<D->e->e_phnum; ++i)
! 578: if (p[i].p_type==PT_DYNAMIC) {
! 579: d=(dyn*)((char*)p[i].p_vaddr+D->codeplus);
! 580: dnum=p[i].p_memsz/sizeof(dyn);
! 581: break;
! 582: }
! 583: for (i=0; i<dnum; ++i)
! 584: if (d[i].d_tag==DT_STRTAB)
! 585: dynstr=(char*)d[i].d_un.d_ptr+D->codeplus;
! 586: else if (d[i].d_tag==DT_RELA)
! 587: r=(rela*)((char*)d[i].d_un.d_ptr+D->codeplus);
! 588: else if (d[i].d_tag==DT_RELASZ)
! 589: rnum=d[i].d_un.d_val/sizeof(rela);
! 590: else if (d[i].d_tag==DT_SYMTAB)
! 591: symtab=(sym*)((char*)d[i].d_un.d_ptr+D->codeplus);
! 592: else if (d[i].d_tag==0)
! 593: break;
! 594:
! 595: for (i=0; i<rnum; ++i) {
! 596: size_t* x=(size_t*)((char*)(r[i].r_offset+D->codeplus));
! 597: size_t sym=R_SYM(r[i].r_info);
! 598: switch (R_TYPE(r[i].r_info)) {
! 599: #if defined(__x86_64__)
! 600: case R_X86_64_64:
! 601: *x=D->codeplus+symtab[sym].st_value;
! 602: break;
! 603: case R_X86_64_COPY:
! 604: _memcpy(x,dlsym_int(symtab[sym].st_name+dynstr,D->next),symtab[sym].st_size);
! 605: break;
! 606: case R_X86_64_GLOB_DAT:
! 607: case R_X86_64_JUMP_SLOT:
! 608: *x=(uintptr_t)_dlsym(symtab[sym].st_name+dynstr);
! 609: break;
! 610: case R_X86_64_RELATIVE:
! 611: *x=r[i].r_addend+D->codeplus;
! 612: break;
! 613: case R_X86_64_32:
! 614: *(uint32_t*)x=*(uint32_t*)_dlsym(symtab[sym].st_name+dynstr)+r[i].r_addend;
! 615: break;
! 616: default:
! 617: __write2("unknown relocation type!\n");
! 618: exit(1);
! 619: break;
! 620: #else
! 621: #error fixme: add relocation types for your platform
! 622: #endif
! 623: }
! 624: }
! 625:
! 626: return;
! 627: }
! 628:
1.1 leitner 629: int main(int argc,char* argv[],char* envp[]) {
630: int fd;
631: size_t l;
632: char* m;
633: char buf[1000];
634: ehdr* e;
635: phdr* p;
636: phdr* code=0,* data=0;
637: shdr* s;
638: dyn* d;
639: size_t dnum;
640: char* dynstr;
641: size_t dynstrlen;
642: size_t i;
643:
644: #if 0
645: {
646: fd=open("/proc/self/maps",O_RDONLY);
647: if (fd!=-1) {
648: size_t l;
649: do {
650: l=read(fd,buf,sizeof(buf));
651: write(1,buf,l);
652: } while (l==sizeof(buf));
653: close(fd);
654: }
655: }
656: #endif
657: if (argc<2) {
658: __write2("usage: ld.so /path/to/binary\n");
659: return 0;
660: }
661: fd=open("/etc/diet.ld.conf",O_RDONLY);
662: if (fd!=-1) {
663: int r=read(fd,path,99);
664: if (r>0) path[r]=0;
665: while (r>0 && path[r-1]=='\n') path[--r]=0;
666: close(fd);
667: }
668: for (i=0; envp[i]; ++i) {
669: if (_memcmp(envp[i],"LD_LIBRARY_PATH=",16)==0)
670: ldlp=envp[i]+16;
671: }
672: fd=open(argv[1],O_RDONLY);
673: if (fd==-1) {
674: __write2("could not open \"");
675: __write2(argv[1]);
676: __write2("\".\n");
677: return 1;
678: }
679: l=read(fd,buf,1000);
680: if (l<sizeof(ehdr) || _memcmp(buf,"\177ELF",4)) {
681: kaputt:
682: __write2("invalid ELF file \"");
683: close(fd);
684: return 1;
685: }
686: e=(ehdr*)buf;
687:
688: if (e->e_ident[4] != (sizeof(size_t)/4)) {
689: __write2("wrong word size!\n");
690: return 1;
691: }
692: #if 0
693: if (e->e_ident[7] != ELFOSABI_LINUX) {
694: __write2("ABI not Linux!\n");
695: return 1;
696: }
697: #endif
698: if (e->e_type != ET_EXEC) {
699: __write2("not an executable!\n");
700: return 1;
701: }
702: if (e->e_machine !=
703: #if defined(__x86_64__)
704: EM_X86_64
705: #elif defined(__i386__)
706: EM_386
707: #else
708: #error architecture not recognized, please add
709: #endif
710: ) {
711: __write2("invalid architecture!\n");
712: return 1;
713: }
714:
715: if (e->e_phentsize != sizeof(phdr)) {
716: __write2("invalid phentsize!\n");
717: return 1;
718: }
719: if (e->e_phnum > 20) {
720: __write2("phnum too large!\n");
721: return 1;
722: }
723: if (e->e_shentsize != sizeof(shdr)) {
724: __write2("invalid shentsize!\n");
725: return 1;
726: }
727: if (e->e_shnum > 100) {
728: __write2("shnum too large!\n");
729: return 1;
730: }
731:
732: if (e->e_phoff>=l || e->e_phentsize*e->e_phnum>l-e->e_phoff) {
733: size_t wanted=e->e_phentsize*e->e_phnum;
734: p=alloca(wanted);
735: if (pread64(fd,p,wanted,e->e_phoff)!=wanted)
736: goto kaputt;
737: } else
738: p=(phdr*)(buf+e->e_phoff);
739:
740: dlls=&dllroot;
741: if (map_sections(fd,e,p,&dllroot)) {
742: __write2("map_sections failed!\n");
743: return 1;
744: }
745: close(fd);
746:
1.2 ! leitner 747: loadlibs(&dllroot);
1.1 leitner 748:
749: /* now load the prerequisites of the libraries we loaded */
750: {
751: struct dll* x;
752: for (x=dlls; x; x=x->next) {
1.2 ! leitner 753: loadlibs(x);
1.1 leitner 754: }
755: }
756:
1.2 ! leitner 757: resolve(&dllroot);
! 758:
! 759: {
! 760: int (*_init)(int argc,char* argv[],char* envp[])=(void*)e->e_entry;
! 761: return _init(argc,argv,envp);
! 762: }
! 763: #if 0
1.1 leitner 764: {
765: char* x=dlsym("theint");
1.2 ! leitner 766: __write1("done\n");
1.1 leitner 767: }
1.2 ! leitner 768: #endif
1.1 leitner 769:
770: #if 0
771: printf("jump to %p\n",e->e_entry);
772: for (i=0; i<16; ++i) {
773: printf("%02x ",((unsigned char*)e->e_entry)[i]);
774: }
775: #endif
776:
777: return 0;
778: }
LinuxTV legacy CVS <linuxtv.org/cvs>