/[jscoverage]/trunk/js/nanojit/Fragmento.cpp
ViewVC logotype

Contents of /trunk/js/nanojit/Fragmento.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 399 - (show annotations)
Tue Dec 9 03:37:47 2008 UTC (11 years ago) by siliconforks
File size: 18422 byte(s)
Use SpiderMonkey from Firefox 3.1b2.

1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 *
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
9 *
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
13 * License.
14 *
15 * The Original Code is [Open Source Virtual Machine].
16 *
17 * The Initial Developer of the Original Code is
18 * Adobe System Incorporated.
19 * Portions created by the Initial Developer are Copyright (C) 2004-2007
20 * the Initial Developer. All Rights Reserved.
21 *
22 * Contributor(s):
23 * Adobe AS3 Team
24 * Mozilla TraceMonkey Team
25 * Asko Tontti <atontti@cc.hut.fi>
26 *
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
38 *
39 * ***** END LICENSE BLOCK ***** */
40
41 #include "nanojit.h"
42 #undef MEMORY_INFO
43
44 namespace nanojit
45 {
46 #ifdef FEATURE_NANOJIT
47
48 using namespace avmplus;
49
50 static uint32_t calcSaneCacheSize(uint32_t in)
51 {
52 if (in < uint32_t(NJ_LOG2_PAGE_SIZE)) return NJ_LOG2_PAGE_SIZE; // at least 1 page
53 if (in > 30) return 30; // 1GB should be enough for anyone
54 return in;
55 }
56
57 /**
58 * This is the main control center for creating and managing fragments.
59 */
60 Fragmento::Fragmento(AvmCore* core, uint32_t cacheSizeLog2)
61 : _allocList(core->GetGC()),
62 _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
63 _pagesGrowth(1)
64 {
65 #ifdef _DEBUG
66 {
67 // XXX These belong somewhere else, but I can't find the
68 // right location right now.
69 NanoStaticAssert((LIR_lt ^ 3) == LIR_ge);
70 NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
71 NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
72 NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt);
73
74 /* Opcodes must be strictly increasing without holes. */
75 uint32_t count = 0;
76 #define OPDEF(op, number, operands) \
77 NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
78 #define OPDEF64(op, number, operands) OPDEF(op, number, operands)
79 #include "LIRopcode.tbl"
80 #undef OPDEF
81 #undef OPDEF64
82 }
83 #endif
84
85 #ifdef MEMORY_INFO
86 _allocList.set_meminfo_name("Fragmento._allocList");
87 #endif
88 NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
89 _core = core;
90 GC *gc = core->GetGC();
91 _frags = new (gc) FragmentMap(gc, 128);
92 _assm = new (gc) nanojit::Assembler(this);
93 verbose_only( enterCounts = new (gc) BlockHist(gc); )
94 verbose_only( mergeCounts = new (gc) BlockHist(gc); )
95 }
96
97 Fragmento::~Fragmento()
98 {
99 AllocEntry *entry;
100
101 clearFrags();
102 _frags->clear();
103 while( _allocList.size() > 0 )
104 {
105 //fprintf(stderr,"dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1));
106 #ifdef MEMORY_INFO
107 ChangeSizeExplicit("NanoJitMem", -1, _gcHeap->Size(_allocList.last()));
108 #endif
109 entry = _allocList.removeLast();
110 _gcHeap->Free( entry->page, entry->allocSize );
111 delete entry;
112 }
113 delete _frags;
114 delete _assm;
115 #if defined(NJ_VERBOSE)
116 delete enterCounts;
117 delete mergeCounts;
118 #endif
119 NanoAssert(_stats.freePages == _stats.pages );
120 }
121
122 void Fragmento::trackFree(int32_t delta)
123 {
124 _stats.freePages += delta;
125 const uint32_t pageUse = _stats.pages - _stats.freePages;
126 if (_stats.maxPageUse < pageUse)
127 _stats.maxPageUse = pageUse;
128 }
129
130 Page* Fragmento::pageAlloc()
131 {
132 NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
133 if (!_pageList) {
134 pagesGrow(_pagesGrowth); // try to get more mem
135 if ((_pagesGrowth << 1) < _max_pages)
136 _pagesGrowth <<= 1;
137 }
138 Page *page = _pageList;
139 if (page)
140 {
141 _pageList = page->next;
142 trackFree(-1);
143 }
144 //fprintf(stderr, "Fragmento::pageAlloc %X, %d free pages of %d\n", (int)page, _stats.freePages, _stats.pages);
145 NanoAssert(pageCount()==_stats.freePages);
146 return page;
147 }
148
149 void Fragmento::pageFree(Page* page)
150 {
151 //fprintf(stderr, "Fragmento::pageFree %X, %d free pages of %d\n", (int)page, _stats.freePages+1, _stats.pages);
152
153 // link in the page
154 page->next = _pageList;
155 _pageList = page;
156 trackFree(+1);
157 NanoAssert(pageCount()==_stats.freePages);
158 }
159
160 void Fragmento::pagesGrow(int32_t count)
161 {
162 NanoAssert(!_pageList);
163 MMGC_MEM_TYPE("NanojitFragmentoMem");
164 Page* memory = 0;
165 if (_stats.pages < _max_pages)
166 {
167 AllocEntry *entry;
168
169 // make sure we don't grow beyond _max_pages
170 if (_stats.pages + count > _max_pages)
171 count = _max_pages - _stats.pages;
172 if (count < 0)
173 count = 0;
174 // @todo nastiness that needs a fix'n
175 _gcHeap = _core->GetGC()->GetGCHeap();
176 NanoAssert(int32_t(NJ_PAGE_SIZE)<=_gcHeap->kNativePageSize);
177
178 // convert _max_pages to gc page count
179 int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize;
180 MMGC_MEM_TYPE("NanoJitMem");
181 memory = (Page*)_gcHeap->Alloc(gcpages);
182 #ifdef MEMORY_INFO
183 ChangeSizeExplicit("NanoJitMem", 1, _gcHeap->Size(memory));
184 #endif
185 NanoAssert((int*)memory == pageTop(memory));
186 //fprintf(stderr,"head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE);
187
188 entry = new (_core->gc) AllocEntry;
189 entry->page = memory;
190 entry->allocSize = gcpages;
191 _allocList.add(entry);
192
193 Page* page = memory;
194 _pageList = page;
195 _stats.pages += count;
196 _stats.freePages += count;
197 trackFree(0);
198 while(--count > 0)
199 {
200 Page *next = page + 1;
201 //fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (intptr_t)page, count);
202 page->next = next;
203 page = next;
204 }
205 page->next = 0;
206 NanoAssert(pageCount()==_stats.freePages);
207 //fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (intptr_t)page, count);
208 }
209 }
210
211 // Clear the fragment. This *does not* remove the fragment from the
212 // map--the caller must take care of this.
213 void Fragmento::clearFragment(Fragment* f)
214 {
215 Fragment *peer = f->peer;
216 while (peer) {
217 Fragment *next = peer->peer;
218 peer->releaseTreeMem(this);
219 delete peer;
220 peer = next;
221 }
222 f->releaseTreeMem(this);
223 delete f;
224 }
225
226 void Fragmento::clearFrag(const void* ip)
227 {
228 if (_frags->containsKey(ip)) {
229 clearFragment(_frags->remove(ip));
230 }
231 }
232
233 void Fragmento::clearFrags()
234 {
235 // reclaim any dangling native pages
236 _assm->pageReset();
237
238 while (!_frags->isEmpty()) {
239 clearFragment(_frags->removeLast());
240 }
241
242 verbose_only( enterCounts->clear();)
243 verbose_only( mergeCounts->clear();)
244 verbose_only( _stats.flushes++ );
245 verbose_only( _stats.compiles = 0 );
246 //fprintf(stderr, "Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
247 }
248
249 Assembler* Fragmento::assm()
250 {
251 return _assm;
252 }
253
254 AvmCore* Fragmento::core()
255 {
256 return _core;
257 }
258
259 Fragment* Fragmento::getAnchor(const void* ip)
260 {
261 Fragment *f = newFrag(ip);
262 Fragment *p = _frags->get(ip);
263 if (p) {
264 f->first = p;
265 /* append at the end of the peer list */
266 Fragment* next;
267 while ((next = p->peer) != NULL)
268 p = next;
269 p->peer = f;
270 } else {
271 f->first = f;
272 _frags->put(ip, f); /* this is the first fragment */
273 }
274 f->anchor = f;
275 f->root = f;
276 f->kind = LoopTrace;
277 f->mergeCounts = new (_core->gc) BlockHist(_core->gc);
278 verbose_only( addLabel(f, "T", _frags->size()); )
279 return f;
280 }
281
282 Fragment* Fragmento::getLoop(const void* ip)
283 {
284 return _frags->get(ip);
285 }
286
287 #ifdef NJ_VERBOSE
288 void Fragmento::addLabel(Fragment *f, const char *prefix, int id)
289 {
290 char fragname[20];
291 sprintf(fragname,"%s%d", prefix, id);
292 labels->add(f, sizeof(Fragment), 0, fragname);
293 }
294 #endif
295
296 Fragment *Fragmento::getMerge(GuardRecord *lr, const void* ip)
297 {
298 Fragment *anchor = lr->exit->from->anchor;
299 for (Fragment *f = anchor->branches; f != 0; f = f->nextbranch) {
300 if (f->kind == MergeTrace && f->ip == ip /*&& f->calldepth == lr->calldepth*/) {
301 // found existing shared branch on anchor
302 return f;
303 }
304 }
305
306 Fragment *f = newBranch(anchor, ip);
307 f->root = f;
308 f->kind = MergeTrace;
309 verbose_only(
310 int mergeid = 1;
311 for (Fragment *g = anchor->branches; g != 0; g = g->nextbranch)
312 if (g->kind == MergeTrace)
313 mergeid++;
314 addLabel(f, "M", mergeid);
315 )
316 return f;
317 }
318
319 Fragment *Fragmento::createBranch(SideExit* exit, const void* ip)
320 {
321 Fragment *f = newBranch(exit->from, ip);
322 f->kind = BranchTrace;
323 f->treeBranches = f->root->treeBranches;
324 f->root->treeBranches = f;
325 return f;
326 }
327
328 #ifdef NJ_VERBOSE
329 uint32_t Fragmento::pageCount()
330 {
331 uint32_t n = 0;
332 for(Page* page=_pageList; page; page = page->next)
333 n++;
334 return n;
335 }
336
337 struct fragstats {
338 int size;
339 uint64_t traceDur;
340 uint64_t interpDur;
341 int lir, lirbytes;
342 };
343
344 void Fragmento::dumpFragStats(Fragment *f, int level, fragstats &stat)
345 {
346 char buf[50];
347 sprintf(buf, "%*c%s", 1+level, ' ', labels->format(f));
348
349 int called = f->hits();
350 if (called >= 0)
351 called += f->_called;
352 else
353 called = -(1<<f->blacklistLevel) - called - 1;
354
355 uint32_t main = f->_native - f->_exitNative;
356
357 char cause[200];
358 if (f->_token && strcmp(f->_token,"loop")==0)
359 sprintf(cause,"%s %d", f->_token, f->xjumpCount);
360 else if (f->_token) {
361 if (f->eot_target) {
362 sprintf(cause,"%s %s", f->_token, labels->format(f->eot_target));
363 } else {
364 strcpy(cause, f->_token);
365 }
366 }
367 else
368 cause[0] = 0;
369
370 _assm->outputf("%-10s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", buf,
371 called, f->guardCount, main, f->_native, f->compileNbr, f->traceTicks/1000, f->interpTicks/1000,
372 cause, labels->format(f->ip));
373
374 stat.size += main;
375 stat.traceDur += f->traceTicks;
376 stat.interpDur += f->interpTicks;
377 stat.lir += f->_lir;
378 stat.lirbytes += f->_lirbytes;
379
380 for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
381 if (x->kind != MergeTrace)
382 dumpFragStats(x,level+1,stat);
383 for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
384 if (x->kind == MergeTrace)
385 dumpFragStats(x,level+1,stat);
386
387 if (f->isAnchor() && f->branches != 0) {
388 _assm->output("");
389 }
390 }
391
392 class DurData { public:
393 DurData(): frag(0), traceDur(0), interpDur(0), size(0) {}
394 DurData(int): frag(0), traceDur(0), interpDur(0), size(0) {}
395 DurData(Fragment* f, uint64_t td, uint64_t id, int32_t s)
396 : frag(f), traceDur(td), interpDur(id), size(s) {}
397 Fragment* frag;
398 uint64_t traceDur;
399 uint64_t interpDur;
400 int32_t size;
401 };
402
403 void Fragmento::dumpRatio(const char *label, BlockHist *hist)
404 {
405 int total=0, unique=0;
406 for (int i = 0, n=hist->size(); i < n; i++) {
407 const void * id = hist->keyAt(i);
408 int c = hist->get(id);
409 if (c > 1) {
410 //_assm->outputf("%d %X", c, id);
411 unique += 1;
412 }
413 else if (c == 1) {
414 unique += 1;
415 }
416 total += c;
417 }
418 _assm->outputf("%s total %d unique %d ratio %.1f%", label, total, unique, double(total)/unique);
419 }
420
421 void Fragmento::dumpStats()
422 {
423 bool vsave = _assm->_verbose;
424 _assm->_verbose = true;
425
426 _assm->output("");
427 dumpRatio("inline", enterCounts);
428 dumpRatio("merges", mergeCounts);
429 _assm->outputf("abc %d il %d (%.1fx) abc+il %d (%.1fx)",
430 _stats.abcsize, _stats.ilsize, (double)_stats.ilsize/_stats.abcsize,
431 _stats.abcsize + _stats.ilsize,
432 double(_stats.abcsize+_stats.ilsize)/_stats.abcsize);
433
434 int32_t count = _frags->size();
435 int32_t pages = _stats.pages;
436 int32_t maxPageUse = _stats.maxPageUse;
437 int32_t free = _stats.freePages;
438 int32_t flushes = _stats.flushes;
439 if (!count)
440 {
441 _assm->outputf("No fragments in cache, %d flushes", flushes);
442 _assm->_verbose = vsave;
443 return;
444 }
445
446 _assm->outputf("\nFragment statistics");
447 _assm->outputf(" loop trees: %d", count);
448 _assm->outputf(" flushes: %d", flushes);
449 _assm->outputf(" compiles: %d / %d", _stats.compiles, _stats.totalCompiles);
450 _assm->outputf(" used: %dk / %dk", (pages-free)<<(NJ_LOG2_PAGE_SIZE-10), pages<<(NJ_LOG2_PAGE_SIZE-10));
451 _assm->outputf(" maxPageUse: %dk", (maxPageUse)<<(NJ_LOG2_PAGE_SIZE-10));
452 _assm->output("\ntrace calls guards main native gen T-trace T-interp");
453
454 avmplus::SortedMap<uint64_t, DurData, avmplus::LIST_NonGCObjects> durs(_core->gc);
455 uint64_t totaldur=0;
456 fragstats totalstat = { 0,0,0,0,0 };
457 for (int32_t i=0; i<count; i++)
458 {
459 Fragment *f = _frags->at(i);
460 while (true) {
461 fragstats stat = { 0,0,0,0,0 };
462 dumpFragStats(f, 0, stat);
463 if (stat.lir) {
464 totalstat.lir += stat.lir;
465 totalstat.lirbytes += stat.lirbytes;
466 }
467 uint64_t bothDur = stat.traceDur + stat.interpDur;
468 if (bothDur) {
469 totalstat.interpDur += stat.interpDur;
470 totalstat.traceDur += stat.traceDur;
471 totalstat.size += stat.size;
472 totaldur += bothDur;
473 while (durs.containsKey(bothDur)) bothDur++;
474 DurData d(f, stat.traceDur, stat.interpDur, stat.size);
475 durs.put(bothDur, d);
476 }
477 if (!f->peer)
478 break;
479 f = f->peer;
480 }
481 }
482 uint64_t totaltrace = totalstat.traceDur;
483 int totalsize = totalstat.size;
484
485 _assm->outputf("");
486 _assm->outputf("lirbytes %d / lir %d = %.1f bytes/lir", totalstat.lirbytes,
487 totalstat.lir, double(totalstat.lirbytes)/totalstat.lir);
488 _assm->outputf(" trace interp");
489 _assm->outputf("%9lld (%2d%%) %9lld (%2d%%)",
490 totaltrace/1000, int(100.0*totaltrace/totaldur),
491 (totaldur-totaltrace)/1000, int(100.0*(totaldur-totaltrace)/totaldur));
492 _assm->outputf("");
493 _assm->outputf("trace ticks trace interp size");
494 for (int32_t i=durs.size()-1; i >= 0; i--) {
495 uint64_t bothDur = durs.keyAt(i);
496 DurData d = durs.get(bothDur);
497 int size = d.size;
498 _assm->outputf("%-4s %9lld (%2d%%) %9lld (%2d%%) %9lld (%2d%%) %6d (%2d%%) %s",
499 labels->format(d.frag),
500 bothDur/1000, int(100.0*bothDur/totaldur),
501 d.traceDur/1000, int(100.0*d.traceDur/totaldur),
502 d.interpDur/1000, int(100.0*d.interpDur/totaldur),
503 size, int(100.0*size/totalsize),
504 labels->format(d.frag->ip));
505 }
506
507 _assm->_verbose = vsave;
508
509 }
510
511 void Fragmento::countBlock(BlockHist *hist, const void* ip)
512 {
513 int c = hist->count(ip);
514 if (_assm->_verbose)
515 _assm->outputf("++ %s %d", labels->format(ip), c);
516 }
517
518 void Fragmento::countIL(uint32_t il, uint32_t abc)
519 {
520 _stats.ilsize += il;
521 _stats.abcsize += abc;
522 }
523
524 #ifdef AVMPLUS_VERBOSE
525 void Fragmento::drawTrees(char *fileName) {
526 drawTraceTrees(this, this->_frags, this->_core, fileName);
527 }
528 #endif
529 #endif // NJ_VERBOSE
530
531 //
532 // Fragment
533 //
534 Fragment::Fragment(const void* _ip) : ip(_ip)
535 {
536 // Fragment is a gc object which is zero'd by the GC, no need to clear fields
537 }
538
539 Fragment::~Fragment()
540 {
541 onDestroy();
542 NanoAssert(_pages == 0);
543 }
544
545 void Fragment::resetHits()
546 {
547 blacklistLevel >>= 1;
548 _hits = 0;
549 }
550
551 void Fragment::blacklist()
552 {
553 blacklistLevel++;
554 _hits = -(1<<blacklistLevel);
555 }
556
557 Fragment *Fragmento::newFrag(const void* ip)
558 {
559 GC *gc = _core->gc;
560 Fragment *f = new (gc) Fragment(ip);
561 f->blacklistLevel = 5;
562 f->recordAttempts = 0;
563 return f;
564 }
565
566 Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
567 {
568 Fragment *f = newFrag(ip);
569 f->anchor = from->anchor;
570 f->root = from->root;
571 f->mergeCounts = from->anchor->mergeCounts;
572 f->xjumpCount = from->xjumpCount;
573 /*// prepend
574 f->nextbranch = from->branches;
575 from->branches = f;*/
576 // append
577 if (!from->branches) {
578 from->branches = f;
579 } else {
580 Fragment *p = from->branches;
581 while (p->nextbranch != 0)
582 p = p->nextbranch;
583 p->nextbranch = f;
584 }
585 return f;
586 }
587
588 void Fragmento::disconnectLoops()
589 {
590 for (int i = 0; i < _frags->size(); ++i) {
591 Fragment* frag = _frags->at(i);
592 if (frag->lastIns->isop(LIR_loop))
593 _assm->disconnectLoop(frag->lastIns->record());
594 }
595 }
596
597 void Fragmento::reconnectLoops()
598 {
599 for (int i = 0; i < _frags->size(); ++i) {
600 Fragment* frag = _frags->at(i);
601 if (frag->lastIns->isop(LIR_loop))
602 _assm->reconnectLoop(frag->lastIns->record());
603 }
604 }
605
606 void Fragment::releaseLirBuffer()
607 {
608 lastIns = 0;
609 }
610
611 void Fragment::releaseCode(Fragmento* frago)
612 {
613 _code = 0;
614 while(_pages)
615 {
616 Page* next = _pages->next;
617 frago->pageFree(_pages);
618 _pages = next;
619 }
620 }
621
622 void Fragment::releaseTreeMem(Fragmento* frago)
623 {
624 releaseLirBuffer();
625 releaseCode(frago);
626
627 // now do it for all branches
628 Fragment* branch = branches;
629 while(branch)
630 {
631 Fragment* next = branch->nextbranch;
632 branch->releaseTreeMem(frago); // @todo safer here to recurse in case we support nested trees
633 delete branch;
634 branch = next;
635 }
636 }
637 #endif /* FEATURE_NANOJIT */
638 }
639
640

  ViewVC Help
Powered by ViewVC 1.1.24