DXR is a code search and navigation tool aimed at making sense of large projects. It supports full-text and regex searches as well as structural queries.

Implementation

Mercurial (fbecf6c8a86f)

VCS Links

Line Code
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
/* ***** BEGIN LICENSE BLOCK *****
 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
 *
 * The contents of this file are subject to the Mozilla Public License Version
 * 1.1 (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 * http://www.mozilla.org/MPL/
 *
 * Software distributed under the License is distributed on an "AS IS" basis,
 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
 * for the specific language governing rights and limitations under the
 * License.
 *
 * The Original Code is [Open Source Virtual Machine.].
 *
 * The Initial Developer of the Original Code is
 * Adobe System Incorporated.
 * Portions created by the Initial Developer are Copyright (C) 2004-2006
 * the Initial Developer. All Rights Reserved.
 *
 * Contributor(s):
 *   Adobe AS3 Team
 *   leon.sha@sun.com
 *
 * Alternatively, the contents of this file may be used under the terms of
 * either the GNU General Public License Version 2 or later (the "GPL"), or
 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
 * in which case the provisions of the GPL or the LGPL are applicable instead
 * of those above. If you wish to allow use of your version of this file only
 * under the terms of either the GPL or the LGPL, and not to allow others to
 * use your version of this file under the terms of the MPL, indicate your
 * decision by deleting the provisions above and replace them with the notice
 * and other provisions required by the GPL or the LGPL. If you do not delete
 * the provisions above, a recipient may use your version of this file under
 * the terms of any one of the MPL, the GPL or the LGPL.
 *
 * ***** END LICENSE BLOCK ***** */

#ifndef __GC__
#define __GC__

#define MMGC_GCENTER(_gc)  MMgc::GCAutoEnter __mmgc_auto_enter(_gc);

#if defined(MMGC_IA32) && defined(_MSC_VER)

#define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size)						\
	jmp_buf __mmgc_env;													\
	VMPI_setjmpNoUnwind(__mmgc_env);													\
	__asm { mov _stack,esp };											\
	_size = (uint32_t)(_gc->GetStackTop() - (uintptr_t)_stack);

#elif AVMSYSTEM_SPARC == 1

#define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size)						\
	jmp_buf __mmgc_env;													\
	VMPI_setjmpNoUnwind(__mmgc_env);													\
	asm ("mov %%sp, %0":"=r" (_stack));									\
	_size = (uint32_t)(_gc->GetOSStackTop() - (uintptr_t)_stack);

#elif AVMSYSTEM_MAC == 1 && AVMSYSTEM_PPC == 1

register void *mmgc_sp __asm__("r1");

#define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size)						\
	jmp_buf __mmgc_env;													\
	VMPI_setjmpNoUnwind(__mmgc_env);													\
	_stack = (void*)mmgc_sp;											\
	_size = (uint32_t)(_gc->GetOSStackTop() - (uintptr_t)_stack);

#else

#define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size)						\
	jmp_buf __mmgc_env;													\
	VMPI_setjmpNoUnwind(__mmgc_env);													\
	_stack = &__mmgc_env;												\
	_size = (uint32_t)(_gc->GetStackTop() - (uintptr_t)_stack);

#endif

// This macro creates a GC root of the current threads stack, there
// are two use cases for this: 
//
// 1) the main thread is halting to let another thread run doing
// possible GC activity (debugger use case)
//
// 2) the stack is moving to another area of memory (stack switching)
// but the old stack must still get scanned
#define MMGC_GC_ROOT_THREAD(_gc)						\
	void *__stack;										\
	size_t __stackSize;									\
	MMGC_GET_STACK_EXTENTS(_gc, __stack, __stackSize);	\
	MMgc::GC::AutoRCRootSegment __root(_gc, __stack, __stackSize);	\
	MMgc::GCAutoEnterPause __mmgc_enter_pause(_gc);

// Enable our own alloca() replacement that always allocates in the heap, this is good on
// systems with limited memory or limited stack
#define AVMPLUS_PARAM_ALLOCA_CUTOFF		4000	// Don't make real alloca() blow the stack; this limit is heuristic
#define AVMPLUS_PARAM_ALLOCA_DEFSIZE	1000	// Default number of bytes in a stack segment for heap-based alloca()
#ifdef AVMPLUS_HEAP_ALLOCA
#  define VMPI_alloca(core, autoptr, nbytes)	core->gc->allocaPush(nbytes, autoptr)
#  define VMPI_alloca_gc(gc, autoptr, nbytes)	gc->allocaPush(nbytes, autoptr)
#else
#  define VMPI_alloca(core, autoptr, nbytes)  (nbytes > AVMPLUS_PARAM_ALLOCA_CUTOFF ? core->gc->allocaPush(nbytes, autoptr) : alloca(nbytes))
#  define VMPI_alloca_gc(gc, autoptr, nbytes)  (nbytes > AVMPLUS_PARAM_ALLOCA_CUTOFF ? gc->allocaPush(nbytes, autoptr) : alloca(nbytes))
#endif

namespace avmplus
{
	class AvmCore;
}

namespace MMgc
{
	class GCAutoEnter;

	/**
	 * GCRoot is root in the reachability graph, it contains a pointer a size 
	 * and will be searched for things.  
	 */
	class GCRoot
	{
		friend class GC;
		GCRoot();
		void init(GC*gc, const void *object, size_t size);
	public:
		/** subclassing constructor */
		GCRoot(GC *gc);
		/** general constructor */
		GCRoot(GC *gc, const void *object, size_t size);
		virtual ~GCRoot();

		// override new and delete so we can know the objects extents (via FixedMalloc::Size())
		void *operator new(size_t size);
        
		void operator delete (void *object);

		const void *Get() const { return object; }
		const void *End() const { return (char*)object + size; }
		void Set(const void *object, size_t size);

		GC *GetGC() const { return gc; }
		/** if your object goes away after the GC is deleted this can be useful */
		void Destroy();

	private:
		GC * gc;

		GCRoot *next;
		GCRoot *prev;
		const void *object;
		size_t size;

		GCWorkItem GetWorkItem() const;
	};

	/**
	 * GCCallback is an interface that allows the application to get
	 * callbacks at interesting GC points.
	 */
	class GCCallback
	{
		friend class GC;
		friend class ZCT;
	public:
		GCCallback(GC *gc);
		virtual ~GCCallback();
		
		GC *GetGC() const { return gc; }
		/** if your object goes away after the GC is deleted this can be useful */
		void Destroy();

		/**
		 * This method is invoked after all marking and before any
		 * sweeping, useful for bookkeeping based on whether things
		 * got marked
		 */
		virtual void presweep();

		/**
		 * This method is invoked after all sweeping
		 */
		virtual void postsweep();

		// called before a ZCT reap begins
		virtual void prereap();

		// called after a ZCT reap completes
		virtual void postreap();

		/**
		 * This method is called before an RC object is reaped
		 */
		virtual void prereap(void* /*rcobj*/);

	private:
		GC *gc;
		GCCallback *nextCB;
		GCCallback *prevCB;
	};

	#ifdef MMGC_64BIT
	#define HIDDENPTRMASK (uintptr_t(0x1L)<<63)
	#else
	#define HIDDENPTRMASK (uintptr_t(0x1L)<<31)
	#endif

	template <class T>
	class GCHiddenPointer
	{
	public:
		GCHiddenPointer(T obj=NULL) { set(obj); }
		operator T() const { return (T) (val^HIDDENPTRMASK);	 }
		T operator=(T tNew) 
		{ 
			set(tNew); 
			return (T)this; 
		}
		T operator->() const { return (T) (val^HIDDENPTRMASK); }

	private:
		// private to prevent its use and someone adding it, GCC creates
		// WriteBarrier's on the stack with it
		GCHiddenPointer(const GCHiddenPointer<T>& toCopy) { GCAssert(false); }
		
		void set(T obj) 
		{
			uintptr_t p = (uintptr_t)obj;
			val = p ^ HIDDENPTRMASK;
		}
		uintptr_t val;
	};

	
	/**
	 * A policy manager for garbage collection.
	 *
	 * The policy manager centralizes policy decisions about how and when to run garbage
	 * collection.  It is queried from the collector code, and collector code signals
	 * to the policy manager at the occurence of certain events.
	 *
	 * Typically the GCPolicyManager is simply embedded inside a GC instance, and manages
	 * policy for that GC.
	 *
	 * ----
	 *
	 * Notes, 2009-06-23 / lhansen:
	 *
	 * This is the second cut, and it implements an allocation-driven policy that is
	 * documented extensively in GC.cpp and in doc/mmgc/policy.pdf.  The policy improves
	 * on the first-cut policy by running the GC less often and having lower pause times.
	 *
	 * ZCT reaping times are not bounded, so ZCT reaping may violate any kind of
	 * incrementality guarantee.  This will be fixed by and by.
	 */
	class GCPolicyManager {
	public:
		GCPolicyManager(GC* gc, GCHeap* heap);
		
		/**
		 * Clean up and print any final statistics.  Should be called from the very
		 * start of the GC's destructor.
		 */
		void shutdown();
		
		/**
		 * Situation: the GC is about to run the incremental marker.
		 *
		 * @return the desired length of the next incremental mark quantum.
		 * @note the result can vary from call to call; the function should
		 *       be called as an incremental mark is about to start and the
		 *       result should not be cached.
		 */
		uint32_t incrementalMarkMilliseconds();
		
		/**
		 * @return the number of blocks owned by this GC, as accounted for by calls to
		 * signalBlockAllocation and signalBlockDeallocation.
		 */
		size_t blocksOwnedByGC();

		/**
		 * @return the number of objects reported marked by signalMarkWork, since startup.
		 */
		uint64_t objectsMarked();
		
		/**
		 * @return the number of bytes reported marked by signalMarkWork, since startup.
		 */
		uint64_t bytesMarked();
		
		/**
		 * Compute a ZCT growth budget (in blocks) based on its current size.
		 *
		 * @return the growth budget
		 */
		uint32_t queryZCTBudget(uint32_t zctBlocksUsed);
		
		/**
		 * Set the lower limit beyond which we try not to run the garbage collector.
		 * The value is specified in 4k blocks, thus 256 == 1MB.
		 */
		void setLowerLimitCollectionThreshold(uint32_t blocks);

		enum PolicyEvent 
		{
			NO_EVENT,
			START_StartIncrementalMark,	// also, start of garbage collection
			END_StartIncrementalMark,
			START_IncrementalMark,
			END_IncrementalMark,
			START_FinalRootAndStackScan,
			END_FinalRootAndStackScan,
			START_FinalizeAndSweep,
			END_FinalizeAndSweep,		// also, end of garbage collection
			START_ReapZCT,
			END_ReapZCT
		};
		
		/**
		 * Situation: a GC event corresponding to one of the PolicyEvent values occurs.
		 * Tell the policy manager about it.
		 */
		void signal(PolicyEvent ev);
		
		/**
		 * Situation: the memory status of the system changes, and a message is being
		 * broadcast to make this known.
		 */
		void signalMemoryStatusChange(MemoryStatus from, MemoryStatus to);
		
		/**
		 * Situation: 'numblocks' blocks have just been obtained by this GC from the GCHeap.
		 */
		void signalBlockAllocation(size_t numblocks);
		
		/** 
		 * Situation: 'numblocks' blocks have just been returned from this GC to the GCHeap.
		 */
		void signalBlockDeallocation(size_t numblocks);
		
		/**
		 * Situation: signal that one pointer-containing object, whose size is nbytes,
		 * has been scanned by the garbage collector.
		 *
		 * This may only be called between the signals START_StartIncrementalMark and 
		 * END_FinalizeAndSweep, and mark work signaled after a START event may not be
		 * reflected in the values returned by objectsMarked() and bytesMarked() until
		 * after the corresponding END event has been signaled.
		 */
		/*REALLY_INLINE*/ void signalMarkWork(size_t nbytes);
		
		/**
		 * Situation: signal that some number of bytes have just been successfully
		 * allocated and are about to be returned to the caller of the allocator.
		 *
		 * @return true if collection work should be triggered because the allocation
		 * budget has been exhausted.
		 */
		/*REALLY_INLINE*/ bool signalAllocWork(size_t nbytes);

		/**
		 * Situation: signal that some number of bytes have just been successfully
		 * freed.
		 */
		/*REALLY_INLINE*/ void signalFreeWork(size_t nbytes);

		/**
		 * Situation: the incremental marker has been started, and we need to know whether
		 * to run another mark increment or push the conclusion to a finish (because the
 		 * total allocation budget for the collection cycle has been exhausted).  This
		 * predicate returns true in the latter case.
		 */
		bool queryEndOfCollectionCycle();

#ifdef MMGC_POLICY_PROFILING
		/**
		 * Situation: signal that one write has been examined by the write barrier and made
		 * it to the given stage of the barrier.   See the barrier code to find out what the
		 * stages are.
		 */
		/*REALLY_INLINE*/ void signalWriteBarrierWork(int stage);

		/**
		 * Situation: signal that the ZCT reaper has run and performed some work.
		 */
		void signalReapWork(uint32_t objects_reaped, uint32_t bytes_reaped, uint32_t objects_pinned);
#endif
#ifdef MMGC_POINTINESS_PROFILING
		/**
		 * Situation: signal that 'words' words have been scanned; that 'could_be_pointer'
		 * number of these words passed the initial heap range checks; and that 
		 * 'actually_is_pointer' number of these words were conservatively found to
		 * be pointers to heap objects.
		 */
		/*inline*/ void signalDemographics(size_t words, size_t could_be_pointer, size_t actually_is_pointer);
#endif
#ifdef MMGC_REFCOUNT_PROFILING
		/**
		 * Situation: signal that IncrementRef has been called on an object.
		 */
		/*REALLY_INLINE*/ void signalIncrementRef();
		
		/**
		 * Situation: signal that DecrementRef has been called on an object.
		 */
		/*REALLY_INLINE*/ void signalDecrementRef();
		
		/**
		 * Situation: signal that one reference is being added to the ZCT.  If 'initial'
		 * is true then the object is freshly allocated and the adding is from RCObject's
		 * constructor.  'population' is the number of elements in the table at the time
		 * ZCT::Add is entered.
		 */
		/*REALLY_INLINE*/ void signalZCTAdd(bool initial, uint32_t population);
		
		/**
		 * Situation: signal that one reference is being removed from the ZCT.  If 'final'
		 * is true then the object is being deleted and the removal is from RCObject's
		 * destructor.
		 */
		/*REALLY_INLINE*/ void signalZCTRemove(bool final);
#endif

		/**
		 * The collector 'gc' (which is not the collector for this manager) has started
		 * a garbage collection, indicating perhaps some memory pressure in that heap.
		 */
		void signalStartCollection(GC* gc);
		
		/**
		 * The collctor 'gc' (which is not the collector for this manager) has completed
		 * a garbage collection.
		 */
		void signalEndCollection(GC* gc);

		/**
		 * Request a full collection happen at the next GC edge (enter/exit)
		 */
		void queueFullCollection() { fullCollectionQueued = true; }

		/**
		 * called after a full collection is done
		 */
		void fullCollectionComplete() { fullCollectionQueued = false; }

		/**
		 * called to find out if a full collection has been requested
		 */
		bool queryFullCollectionQueued() { return fullCollectionQueued; }

		/**
		 * Called by the owner when the owner is notified of an imminent abort
		 */
		void SignalImminentAbort();
		
		// ----- Public data --------------------------------------
		
		// Elapsed time (in ticks) for various collection phases, and the maximum phase time
		// for each phase.  They are updated when an END event is signaled.  Client code must
		// not modify these variables.

		// The total time for various collection phases across the run
		uint64_t timeStartIncrementalMark;
		uint64_t timeIncrementalMark;
		uint64_t timeFinalRootAndStackScan;
		uint64_t timeFinalizeAndSweep;
		uint64_t timeReapZCT;

		// The total time doing collection work (sum of the variables above except the ZCT reap
		// time) and the elapsed time from the start of StartIncrementalMark to the end of
		// FinalizeAndSweep.  Together they provide a crude approximation to a measure of 
		// pause clustering.
		uint64_t timeInLastCollection;
		uint64_t timeEndToEndLastCollection;

		// Time for ZCT reaping during the last collection cycle (end of one FinalizeAndSweep
		// to the end of the next one).
		uint64_t timeReapZCTLastCollection;

		// The maximum latceny for various collection phases across the run
		uint64_t timeMaxStartIncrementalMark;
		uint64_t timeMaxIncrementalMark;
		uint64_t timeMaxFinalRootAndStackScan;
		uint64_t timeMaxFinalizeAndSweep;
		uint64_t timeMaxReapZCT;
		
		// The maximum latcency for various collection phases during the previous collection cycle
		uint64_t timeMaxStartIncrementalMarkLastCollection;
		uint64_t timeMaxIncrementalMarkLastCollection;
		uint64_t timeMaxFinalRootAndStackScanLastCollection;
		uint64_t timeMaxFinalizeAndSweepLastCollection;
		uint64_t timeMaxReapZCTLastCollection;

		// The total number of times each phase was run
		uint64_t countStartIncrementalMark;
		uint64_t countIncrementalMark;
		uint64_t countFinalRootAndStackScan;
		uint64_t countFinalizeAndSweep;
		uint64_t countReapZCT;
		
	private:
		// The following parameters can vary not just from machine to machine and
		// run to run on the same machine, but within a run in response to memory
		// pressures and other feedback.
		
		// The lower limit beyond which we try not to run the garbage collector.
		uint32_t lowerLimitCollectionThreshold();
		
		// Get the current time (in ticks).
		uint64_t now();

#ifdef MMGC_POLICY_PROFILING
		// Convert ticks to milliseconds, as a double (used for printing)
		double ticksToMillis(uint64_t ticks);
		
		// @return true if we should print policy data
		bool summarizeGCBehavior();

		// Print policy data.  At shutdown, afterCollection will be passed as false.
		void PrintGCBehaviorStats(bool afterCollection=true);
#endif

 		// Various private methods for the GC policy follow.  See comment in GC.cpp for details.
 
 		// Amount of GC work to perform (bytes to scan) per byte allocated while the GC is active
 		double W();
 
 		// Amount of allocation to allow between two invocations of IncrementalMark
 		double A();
 
 		// Called from the policy event handler to start computing adjustments to R: before any
 		// mark work is performed.
 		void startAdjustingR();
 
 		// Called from the policy event handler to finish computing adjustments to R: after all
 		// mark work has been performed.
 		void endAdjustingR();
 
 		// Called from adjustPolicyForNextMajorCycle to compute the effective L for the next
 		// collection cycle
 		void adjustL();

		// Called from the policy manager's constructor
		void adjustPolicyInitially();
		
 		// Called from the policy event handler to compute the GC policy for the next 
 		// major collection cycle (from the end of one FinishIncrementalMark to the start
 		// of the next one)
 		void adjustPolicyForNextMajorCycle();
 
 		// Called from the policy event handler to compute the GC policy for the next
 		// minor collection cycle (from the end of one IncrementalMark to the start of the
 		// next one)
 		void adjustPolicyForNextMinorCycle();
 
		// ----- Private data --------------------------------------
		
		GC * const gc;
		GCHeap * const heap;
		
		// The time recorded the last time we received signalEndOfIncrementalMark
		uint64_t timeEndOfLastIncrementalMark;

		// The time recorded the last time we received signalStartOfCollection
		uint64_t timeStartOfLastCollection;

		// The time recorded the last time we received signalEndOfCollection
		uint64_t timeEndOfLastCollection;

		// The total number of blocks owned by GC, and the maximum such number
		size_t blocksOwned;
		size_t maxBlocksOwned;
		
		// The number of objects scanned since startup (which is equivalent to the number
		// of calls to GC::MarkItem), less the number scanned during the last
		// collection cycle.
		uint64_t objectsScannedTotal;
		
		// The number of objects scanned during the last collection cycle.
		uint32_t objectsScannedLastCollection;
		
		// The number of bytes scanned since startup less the ones scanned during the 
		// last collection cycle.
		uint64_t bytesScannedTotal;
		
		// The number of bytes scanned during the last collection cycle.
		uint32_t bytesScannedLastCollection;

		// Temporaries for holding the start time / start event until the end event arrives
		uint64_t start_time;
		PolicyEvent start_event;

		// Value returned by lowerLimitCollectionThreshold() and set by setLowerLimitCollectionThreshold():
		// the heap size, in blocks, below which we do not collect.
		uint32_t collectionThreshold;

		// true if a forced garbage collection has been requested
		bool fullCollectionQueued;

		// true if the ZCT statistics for the current GC cycle should be cleared;
		// this is required because the ZCT stats may need to be updated before
		// StartIncrementalMark but can't be cleared at the end of FinalizeAndSweep.
		bool pendingClearZCTStats;

#ifdef MMGC_POLICY_PROFILING
		// Records the heap population before we sweep.  Unit is blocks except where noted.
		size_t heapAllocatedBeforeSweep;
		size_t heapUsedBeforeSweep;
		size_t gcAllocatedBeforeSweep;
		size_t gcBytesUsedBeforeSweep;
		
		// Barrier stages hit in InlineWriteBarrier: examine mark, and hit the barrier.
		uint64_t barrierStageTotal[2];
		uint32_t barrierStageLastCollection[2];

		// Reap work, overall
		uint64_t objectsReaped;
		uint64_t bytesReaped;
		uint64_t objectsPinned;
		
		// Allocation work, overall
		uint64_t objectsAllocated;
		uint64_t bytesAllocated;
#endif
#ifdef MMGC_POINTINESS_PROFILING
		// Track the number of scannable words, the number that passes the initial range
		// check, and the number that turn out to be (conservative) pointers.
		//
		// These are cleared at the start of each GC so stats are only valid for one
		// GC cycle.
		uint64_t candidateWords;
		uint64_t couldBePointer;
		uint64_t actuallyIsPointer;
#endif
#ifdef MMGC_REFCOUNT_PROFILING
		uint64_t incrementRefTotal;
		uint64_t incrementRefLastCollection;
		uint64_t decrementRefTotal;
		uint64_t decrementRefLastCollection;
		uint32_t zctPeakSize;
		uint64_t addZCTTotal;
		uint64_t addZCTLastCollection;
		uint64_t removeZCTTotal;
		uint64_t removeZCTLastCollection;
		uint64_t addZCTInitialTotal;
		uint64_t removeZCTFinalTotal;
#endif

		// Various policy parameters.  For more documentation, see comments in GC.cpp.
		
		// max pause time in seconds
		double P;

		// approximate mark rate in bytes/sec, [1M,infty)
		double R;

		// requested inverse load factor (1,infty)
		double L_ideal;
		
		// adjusted inverse load factor (adjusted for heap pressure, growth, etc)
		double L_actual;
		
		// gc trigger as fraction of allocation budget to use before triggering GC [0,1]
		double T;

		// ratio of gc work to mutator work while the gc is running
		double G;

		// largest multiple of L_ideal to which L_actual can grow (unless 0, which means unlimited)
		double X;

		// the remaining allocation budget for the major GC cycle.  (This can go negative
		// and the variable must accomodate negative values.  It's not frequently accessed.)
		double remainingMajorAllocationBudget;

		// the allocation budget for the minor GC cycle.  This variable remains constant
		// throughout the minor cycle.
		int32_t minorAllocationBudget;
		
		// the remaining allocation budget for the minor GC cycle.  Initially this has the
		// value of minorAllocationBudget; the allocation request size is subtracted for
		// every allocation.  This variable can go negative because we can overshoot the
		// budget.
		int32_t remainingMinorAllocationBudget;
		
		// Temporaries used to compute R
		uint64_t adjustR_startTime;
		uint64_t adjustR_totalTime;
	};
	
	/**
	 * This is a general-purpose garbage collector used by the Flash Player.
	 * Application code must implement the GCRoot interface to mark all
	 * reachable data.  Unreachable data is automatically destroyed.
	 * Objects may optionally be finalized by subclassing the GCObject
	 * interface.
	 *
	 * This garbage collector is intended to be modular, such that
	 * it can be used in other products or replaced with a different
	 * collector in the future.
	 *
	 * Memory allocation and garbage collection strategy:
	 * Mark and sweep garbage collector using fixed size classes
	 * implemented by the GCAlloc class.  Memory blocks are obtained
	 * from the OS via the GCHeap heap manager class.
	 *
	 * When an allocation fails because a suitable memory block is
	 * not available, the garbage collector decides either to garbage
	 * collect to free up space, or to expand the heap.  The heuristic
	 * used to make the decision to collect or expand is taken from the
	 * Boehm-Demers-Weiser (BDW) garbage collector and memory allocator.
	 * The BDW algorithm is (pseudo-code):
	 *
	 *    if (allocs since collect >= heap size / FSD)
	 *      collect
	 *    else
	 *      expand(request size + heap size / FSD)
	 *
	 * The FSD is the "Free Space Divisor."  For the first cut, I'm trying
	 * 4.  TODO: Try various FSD values against real Player workloads
	 * to find the optimum value.
	 *
	 */
	class GC : public OOMCallback
	{
		friend class GCRoot;
		friend class GCHeap;
		friend class GCCallback;
		friend class GCAlloc;
		friend class GCLargeAlloc;
		friend class RCObject;
		friend class GCInterval;
		friend class ZCT;
		friend class AutoRCRootSegment;
		friend class GCPolicyManager;
	public:

		/**
		 * If you need context vars use this!
		 * FIXME: document context variables.
		 */
		enum
		{
			GCV_COREPLAYER,
			GCV_AVMCORE,
			GCV_COUNT
		};
		
		void *GetGCContextVariable(int var) const;

		void SetGCContextVariable(int var, void *val);
		
		/**
		 * @return the AvmCore instance associated with this GC.
		 */
		avmplus::AvmCore *core() const;

		/**
		 * greedy is a debugging flag.  When set, every allocation will cause
		 * a garbage collection.  This makes code run abysmally slow, but can
		 * be useful for detecting mark bugs or computing true peak live size.
		 * (The flag is effective in both debug and release builds.)
		 */
		const bool greedy;

		/**
		 * nogc is a debugging flag.  When set, garbage collection never happens.
		 * (The flag is effective in both debug and release builds.)
		 */
		const bool nogc;

		/**
		 * incremental controls the behavior of the garbage collection.  If true,
		 * collection is incremental (and pause times are more or less controlled);
		 * if false, collection is stop-the-world.
		 */
		const bool incremental;
		
		/**
	     * findUnmarkedPointers is a debugging flag.  If true, the GC will scan the
		 * heap at the end of garbage collection, asserting that every word that
		 * looks like it conservatively points to an object points to an object that
		 * was marked by the collector.  The flag is available in release builds but
		 * effective in debug builds only.)
		 */
		bool findUnmarkedPointers;

		/**
		 * validateDefRef is a debugging flag.  It turns on code that does a
		 * trace before reaping zero count object and asserting on any objects
		 * that get marked.  Significant space cost.  (The flag is available
		 * in release builds but effective in debug builds only.)
		 */
		bool validateDefRef;		
		
		/**
		 * keepDRCHistory is a debugging flag.  It turns on code that keeps the call
		 * stacks of all IncrementRef and DecrementRef operations on each RC object,
		 * and prints this history when the reference count is already zero
		 * on entry to DecrementRef.  Significant space cost.  (The flag is available
		 * in release builds but effective in debug builds only.)
		 */
		bool keepDRCHistory;

		bool dontAddToZCTDuringCollection;
		bool incrementalValidation;

#ifdef _DEBUG
		bool incrementalValidationPedantic;
#endif

		/**
		 * Garbage collection mode.  The GC is configured at creation in one of
		 * these (it would be pointlessly hairy to allow the mode to be changed
		 * at run-time).
		 */
		enum GCMode 
		{
			kDisableGC=1,		// never collect
			kGreedyGC,			// stop-the-world collection at every allocation
			kIncrementalGC,		// incremental collection
			kNonincrementalGC	// nonincremental collection
		};

		GC(GCHeap *heap, GCMode mode);
		
		virtual ~GC();
		
		/**
		 * Causes an immediate stop-the-world garbage collection (or finishes any
		 * incremental collection underway).
		 */
		void Collect(bool scanStack=true);

		/**
		 * Do a full collection at the next MMGC_GCENTER macro site
		 */
		void QueueCollection();

		/**
		 * flags to be passed as second argument to alloc
		 */
		enum AllocFlags
		{
			kZero=1,
			kContainsPointers=2,
			kFinalize=4,
			kRCObject=8,
			kCanFail=16			
		};

		// FIXME: why is this public?
		enum PageType
		{
			kNonGC = 0,
			kGCAllocPage = 1,
			kGCLargeAllocPageRest = 2,
			kGCLargeAllocPageFirst = 3
		};

		/**
		 * Main interface for allocating memory.  Default flags are
		 * no finalization, not containing pointers, not zero'd, and not ref-counted.
		 *
		 * This function returns NULL only if kCanFail is passed in 'flags'.
		 */
		void *Alloc(size_t size, int flags=0);

		/**
		 * Specialized implementations of Alloc().  Flags are omitted, each function is annotated
		 * with the flags they assume.   Additionally, 'size' is usually known statically in the 
		 * calling context (typically a 'new' operator).  Finally, these are always inlined.
		 * The result is that most computation boils away and we're left with just a call to the
		 * underlying primitive operator.
		 */
		void *AllocPtrZero(size_t size);			// Flags: GC::kContainsPointers|GC::kZero
		void *AllocPtrZeroFinalized(size_t size);	// Flags: GC::kContainsPointers|GC::kZero|GC::kFinalize
		void *AllocRCObject(size_t size);			// Flags: GC::kContainsPointers|GC::kZero|GC::kRCObject|GC::kFinalize

		/**
		 * Like Alloc but optimized for the case of allocating one 8-byte non-pointer-containing
		 * non-finalizable non-rc non-zeroed object (a box for an IEEE double).
		 */
		void* AllocDouble();

		/**
		 * Like Alloc but allocating a little extra memory; factored out as a
		 * separate function in order to allow for a fast object overflow check.
		 */
		void *AllocExtra(size_t size, size_t extra, int flags=0);

		/**
		 * Specialized implementations of Alloc().  See above for explanations.
		 */
		void *AllocExtraPtrZero(size_t size, size_t extra);				// Flags: GC::kContainsPointers|GC::kZero
		void *AllocExtraPtrZeroFinalized(size_t size, size_t extra);	// Flags: GC::kContainsPointers|GC::kZero|GC::kFinalize
		void *AllocExtraRCObject(size_t size, size_t extra);			// Flags: GC::kContainsPointers|GC::kZero|GC::kRCObject|GC::kFinalize
		
		/**
		 * Out-of-line version of AllocExtra, used by the specialized versions
		 */
		void *OutOfLineAllocExtra(size_t size, size_t extra, int flags);
		
		/**
		 * Just like Alloc but can return NULL
		 */
		void *PleaseAlloc(size_t size, int flags=0);
		
		/**
		 * Signal that we've allocated some memory and that collection can be triggered
		 * if necessary.
		 */
		void SignalAllocWork(size_t size);

	private:
		const static size_t kLargestAlloc = 1968;

		class RCRootSegment : public GCRoot
		{
		public:
			RCRootSegment(GC* gc, void* mem, size_t size);
			void*			mem;
			size_t			size;
			RCRootSegment*	prev;
			RCRootSegment*	next;
		};
		
		RCRootSegment* rcRootSegments;
		
		void AddRCRootSegment(RCRootSegment *segment);

		void RemoveRCRootSegment(RCRootSegment *segment);

	public:

		class AutoRCRootSegment : public RCRootSegment
		{
		public:
			AutoRCRootSegment(GC* gc, void* mem, size_t size);
			~AutoRCRootSegment();			
		};

		
		/**
		 * Allocate memory that will be scanned for pointers to GC memory
		 * and from which pointers to RC objects will pin those objects.
		 */
		void* AllocRCRoot(size_t size);
		
		/**
		 * Free memory allocated with AllocRCRoot.
		 */
		void FreeRCRoot(void* mem);
		
		/**
		 * overflow checking way to call Alloc for a # of n size'd items,
		 * all instance of Alloc(num*sizeof(thing)) should be replaced with:
		 * Calloc(num, sizeof(thing))
		 */
		void *Calloc(size_t num, size_t elsize, int flags=0);

		/**
		 * One can free a GC allocated pointer.  The pointer may be NULL.
		 */
		void Free(const void *ptr);

		/**
		 * One can free a GC allocated pointer.  The pointer must not be NULL.
		 */
		void FreeNotNull(const void *ptr);
		
		/**
		 * @return the size of a managed object given a user or real pointer to its
		 * beginning.  The returned value may be bigger than what was asked for.
		 */
		static size_t Size(const void *ptr);

		/**
		 * @return the GC object associated with a managed object.
		 * this item must be a small item (<1968) or the first page of a large allocation,
		 * it won't work on secondary pages of a large allocation,
		 */
		static GC* GetGC(const void *item);
		
		/**
		 * Tracers should employ GetMark and SetMark to
		 * set the mark bits during the mark pass.
		 */
		static int GetMark(const void *item);
		
		static int SetMark(const void *item);

		// Not a hot method
		void ClearQueued(const void *item);
		
		// not a hot method
		static void ClearFinalized(const void *item);

		// not a hot method
		static void SetFinalize(const void *item);

		// not a hot method
		static int IsFinalized(const void *item);

		// not a hot method
		static int HasWeakRef(const void *item);

		/**
		 * Used by sub-allocators to obtain memory.
		 */
		void* AllocBlock(int size, int pageType, bool zero=true, bool canFail=false);

		void FreeBlock(void *ptr, uint32_t size);

		// Host API: this should go away but Player code is currently using it.  VM code should not use it.
		GCHeap *GetGCHeap() const;

		void ReapZCT(bool scanStack=true);
		
		bool Reaping();

#ifdef _DEBUG
		// Test whether the RCObject cleaned itself properly by zeroing everything.
		void RCObjectZeroCheck(RCObject *);

		// Test whether 'item' points into a non-free object in the gc'd heap.
		bool IsPointerIntoGCObject(const void* item);
#endif

		/**
		 * Do as much marking as possible in the allotted time slice
		 */
		void IncrementalMark();

		/**
		 * Perform some garbage collection work: the allocation budget has been exhausted.
		 */
		void CollectionWork();

		/**
		 * Are we currently marking?
		 */
		bool IncrementalMarking();

		//////////////////////////////////////////////////////////////////////////
		//
		// Write barrier.  Those that are REALLY_INLINE are defined in WriteBarrier.h.

	private:
		/**
		 * Perform the actual store of value into *address, adjusting reference counts.
		 */
		/*REALLY_INLINE*/ void WriteBarrierWriteRC(const void *address, const void *value);
		
		/*
		 * Streamlined versions to be used from the ctor/dtor of containers.
		 */
		/*REALLY_INLINE*/ void WriteBarrierWriteRC_ctor(const void *address, const void *value);
		/*REALLY_INLINE*/ void WriteBarrierWriteRC_dtor(const void *address);
		
		/**
		 * Perform the actual store of value into *address.  (This is just a store, but there
		 * is additional error checking in debug builds.)
		 */
		/*REALLY_INLINE*/ void WriteBarrierWrite(const void *address, const void *value);

		/**
		 * The marker is running and the container object has just been transitioned from
		 * 'marked' to 'queued'.
		 */
		void WriteBarrierHit(const void* container);
		
	private:
		/**
		 * Implementation of privateWriteBarrier; too large to be inlined everywhere.
		 */
		/*REALLY_INLINE*/ void privateInlineWriteBarrier(const void *container, const void *address, const void *value);

		/**
		 * Like the preceding but computes 'container' only if 'marking' is true (important optimization).
		 */
		/*REALLY_INLINE*/ void privateInlineWriteBarrier(const void *address, const void *value);
		
		/**
		 * Implementation of privateWriteBarrierRC; too large to be inlined everywhere.
		 */
		/*REALLY_INLINE*/ void privateInlineWriteBarrierRC(const void *container, const void *address, const void *value);

		/**
		 * Like the preceding but computes 'container' only if 'marking' is true (important optimization).
		 */
		/*REALLY_INLINE*/ void privateInlineWriteBarrierRC(const void *address, const void *value);
		
		/**
		 * Out-of-line implementation of ConservativeWriteBarrierNoSubstitute, called only
		 * when 'marking' is true.
		 */
		void privateConservativeWriteBarrierNoSubstitute(const void *address);
		
	public:
		/**
		 * General, conditional write barrier trap.  Tests that incremental marking is in fact ongoing
		 * and that the container is black (marked, not queued); if so, it makes the container gray
		 * by scheduling it for rescanning at some appropriate time.
		 *
		 * Container must be a non-NULL untagged pointer to the beginning of an object on a page
		 * owned by this GC.
		 */
		void WriteBarrierTrap(const void *container);
		
		/**
		 * Inline implementation of WriteBarrierTrap; probably too large to be inlined everywhere
		 * so use sparingly.  AvmCore::atomWriteBarrier benefits substantially from inlining it.
		 *
		 * PRECONDITION: 'marking' must be true when this is called.
		 */
		/*REALLY_INLINE*/ void InlineWriteBarrierTrap(const void *container);
		
		/**
		 * Standard write barrier write for non-RC values.  If marking is ongoing, and the 'container'
		 * is black (ie marked and not queued) and the 'value' is white (ie unmarked and not queued) 
		 * then make sure to queue value for marking.
		 *
		 * Finally stores value into *address.
		 *
		 * 'container' may be NULL, in which case this is just a store and 'address' can be arbitrary.
		 *
		 * If 'container' is not NULL then it is constrained as for WriteBarrierTrap, and 'address'
		 * must point into the object referenced by 'container'.
		 *
		 * Value can be anything except a reference to an RCObject.
		 *
		 * This is called by the WB macro in WriteBarrier.h - not an API to be used otherwise.
		 */
		void privateWriteBarrier(const void *container, const void *address, const void *value);

		/**
		 * A write barrier that finds the container's address and the container's
		 * GC and then performs a standard write barrier operation (see privateWriteBarrier).
		 * Finally stores value into *address.
		 */
		static void WriteBarrier(const void *address, const void *value);
		
		/**
		 * Standard write barrier write for RC values.  If marking is ongoing, and the 'container'
		 * is black (ie marked and not queued) and the 'value' is white (ie unmarked and not queued) 
		 * then make sure to queue value for marking.
		 *
		 * Finally stores value into *address, adjusting the reference counts of both the old value
		 * and the new value.
		 *
		 * 'container' is constrained as for WriteBarrierTrap.
		 *
		 * 'address' must point into the object referenced by 'container'.  The value there, if not
		 * NULL, must be a (possibly tagged) pointer to an RC object.
		 *
		 * 'value', if not NULL, must point to an RC object owned by this GC.
		 * 
		 * This is called by the WBRC macro in WriteBarrier.h - not an API to be used otherwise.
		 */
		void privateWriteBarrierRC(const void *container, const void *address, const void *value);
		
		/**
		 * A write barrier that finds the container's address and the container's GC
		 * and then performs a standard RC write barrier operation (see privateWriteBarrierRC).
		 */
		static void FASTCALL WriteBarrierRC(const void *address, const void *value);

		/**
		 * Like WriteBarrierRC, but used when calling from a container's ctor/dtor.
		 * We can avoid some unnecessary work in each case.
		 */
		static void FASTCALL WriteBarrierRC_ctor(const void *address, const void *value);
		static void FASTCALL WriteBarrierRC_dtor(const void *address);

		/**
		 * Host API: if 'address' points to a GC page (it can point into an object, not just
		 * to the start of one), and 'value' points to a GC page as well, record that 'value'
		 * has been stored into the object containing 'address'.
		 *
		 * The function does /not/ perform the store (this is what NoSubstitute means).
		 *
		 * The function works for both RCObjects and GCObjects.
		 */
		void ConservativeWriteBarrierNoSubstitute(const void *address, const void *value);

		/**
		 * Host API: 'container' must be a non-NULL untagged pointer to the beginning of an
		 * object on a page owned by this GC.  Record that 'value' has been stored into
		 * 'container'.
		 *
		 * The function does /not/ perform the store (this is what NoSubstitute means).
		 *
		 * The function works for both RCObjects and GCObjects.
		 */
		void WriteBarrierNoSubstitute(const void *container, const void *value);
		
	public:
		GCPolicyManager policy;
		
	public:

		// not a hot method
		bool ContainsPointers(const void *item);

		// not a hot method.  Will reliably return NULL if gcItem does not point into managed
		// memory (and _DEBUG code will also assert if that happens - it should not).
		void *FindBeginningGuarded(const void *gcItem, bool allowGarbage=false);

		// Legacy public API.  DO NOT USE from within AVM code.
		void *FindBeginning(const void *gcItem);

		// Used on the critical path of the write barrier path.  gcItem must point into
		// managed memory, and NULL is never returned.  Don't use this for GCAssert and
		// similar purposes, but use it when speed is key.
		void *FindBeginningFast(const void *gcItem);
		
		// not a hot method; PinStackObjects goes directly to GCAlloc::IsRCObject/GCLargeAlloc::IsRCObject.
		bool IsRCObject(const void *);

		/**
		 * True during Sweep phase.  Application code can use this to
		 * determine if it's being called (directly or indirectly) from a
		 * finalizer.
		 */
		bool Collecting();

		/**
		 * @return true if the item points to a page containing managed objects.
		 * Any pointer can be passed in here.
		 */
		bool IsPointerToGCPage(const void *item);

		bool IsPointerToGCObject(const void *realPtr);
		
		/**
		 * GC initialization time, in ticks.  Used for logging.
		 * FIXME: why is this public?
		 */
		const uint64_t t0;

		// FIXME: these three static methods could be lifted out of the GC class to the global level,
		// they're simple utilities.
		static double duration(uint64_t start);
		
		// a tick is the unit of VMPI_getPerformanceFrequency()
		static uint64_t ticksToMicros(uint64_t ticks);

		static uint64_t ticksToMillis(uint64_t ticks);

		/**
		 * Total number of bytes of pointer-containing memory scanned by this
		 * GC.  Used to measure marking rate, which is
		 * <code>bytesMarked()/ticksToMillis(markTicks())</code>.
		 */
		uint64_t bytesMarked();

		/**
		 * Total time spent doing incremental marking, in ticks.  See
		 * bytesMarked.
		 */
		uint64_t markTicks();

		// calls to mark item
		uint32_t lastStartMarkIncrementCount;
		uint32_t markIncrements();

		/**
		 * Number of calls to Sweep().
		 */
        uint32_t sweeps;

		/**
		 * Time of the latest FinishIncrementalMark() call, in ticks.
		 */
		uint64_t sweepStart;

		// called at some apropos moment from the mututor, ideally at a point
		// where all possible GC references will be below the current stack pointer
		// (ie in memory we can safely zero).  This will return right away if
		// called more than once between collections so it can safely be called
		// a lot without impacting performance
		void CleanStack(bool force=false);

		bool Destroying();

		static GCWeakRef *GetWeakRef(const void *obj);
		
		// a WeakRef that always refers to null. useful if you need one.
		GCWeakRef* emptyWeakRef;

		void ClearWeakRef(const void *obj);

		// legacy API that gets physical start of OS thread
		uintptr_t GetOSStackTop() const;

		uintptr_t GetStackTop() const;

		uintptr_t GetStackEnter() const;

		// for deciding a tree of things should be scanned from presweep
		void PushWorkItem_MayFail(GCWorkItem &item);
		bool GetMarkStackOverflow() const { return m_markStackOverflow; }
		
#ifdef DEBUG
		// Check that invariants for an inactive GC hold
		void ShouldBeInactive();
#endif

	private:

		// heapAlloc is like heap->Alloc except that it also calls policy.signalBlockAllocation
		// if the allocation succeeded.
		void *heapAlloc(size_t size, int flags=GCHeap::kExpand|GCHeap::kZero|GCHeap::kProfile);

		// heapFree is like heap->Free except that it also calls policy.signalBlockDeallocation.
		void heapFree(void *ptr, size_t siz=0, bool profile=true);

		friend class GCAutoEnter;
		friend class GCAutoEnterPause;
		void SetStackEnter(GCAutoEnter *enter, bool doCollectionWork=true);

		GCAutoEnter *GetAutoEnter();

 		vmpi_spin_lock_t m_gcLock;

 		bool onThread();

		// store a handle to the thread that create the GC to ensure thread safety
		vmpi_thread_t m_gcThread;

		// Will be called from GCHeap::AbortCleanup before a longjmp that jumps past the
		// GC (effectively aborting an operation, be it allocation or GC).  This function
		// must be aware of the internal state of the GC and must take
		// care to make that state consistent.
		void SignalImminentAbort();
		
		void gclog(const char *format, ...);
		void log_mem(const char *name, size_t s, size_t comp );

		const static int kNumSizeClasses = 40;

		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
		const static int kPageUsableSpace = 3936;

		uint32_t *GetBits(int numBytes, int sizeClass);

		void FreeBits(uint32_t *bits, int sizeClass);

		uint32_t *m_bitsFreelists[kNumSizeClasses];
		uint32_t *m_bitsNext;

		GCHashtable weakRefs;

		// BEGIN FLAGS
		// The flags are hot, group them and hope they end up in the same cache line
		
		// True when the GC is being destroyed
		bool destroying;

		/**
		 * True if incremental marking is on and some objects have been marked.
		 * This means write barriers are enabled.
		 *
		 * The GC thread may read and write this flag.  Application threads in
		 * requests have read-only access.
		 *
		 * It is possible for marking==true and collecting==false but not vice versa.
		 */
		bool marking;
		
		/**
		 * True during the sweep phase of collection.  Several things have to
		 * behave a little differently during this phase.  For example,
		 * GC::Free() does nothing during sweep phase; otherwise finalizers
		 * could be called twice.
		 *
		 * Also, Collect() uses this to protect itself from recursive calls
		 * (from badly behaved finalizers).
		 *
		 * It is possible for marking==true and collecting==false but not vice versa.
		 */
		bool collecting;
		
		// END FLAGS
		
		
		// we track the top and bottom of the stack for cleaning purposes.
		// the top tells us how far up the stack as been dirtied.
		// the bottom is also tracked so we can ensure we're on the same
		// stack that the GC responsible for cleaning.  necessary if multiple
		// threads use the GC.  only thread that creates the GC will have its stack
		// tracked and cleaned.
		bool stackCleaned;
		const void *rememberedStackTop;
		GCAutoEnter* stackEnter;

		GCRoot* emptyWeakRefRoot;

		GCMarkStack m_incrementalWork;
		void StartIncrementalMark();
		void FinishIncrementalMark(bool scanStack);

		bool m_markStackOverflow;
		void HandleMarkStackOverflow();
		void SignalMarkStackOverflow(GCWorkItem& item);
		
		GCMarkStack m_barrierWork;
		void CheckBarrierWork();
		void FlushBarrierWork();

		// Set at initialization to the deepest recursion level MarkItem is allowed to
		// reach.  Managed entirely within MarkItem.
		uint32_t mark_item_recursion_control;
		
#ifdef _DEBUG
		bool IsWhite(const void *item);
#endif

		// Used heavily by the write barrier.
		bool IsMarkedThenMakeQueued(const void* userptr);

		// Used heavily by GC::Free.
		bool IsQueued(const void* userptr);

		const static int16_t kSizeClasses[kNumSizeClasses];		
		const static uint8_t kSizeClassIndex[246];

		// These two members help optimize GC::Alloc: by keeping a pointer in the GC instance
		// for the kSizeClassIndex table we avoid code generation SNAFUs when compiling with -fPIC,
		// which is the default on Mac at least.  (GCC generates a call-to-next-instruction-and-pop
		// to obtain the PC address, from which it can compute the table address.  Keeping the
		// member here effectively hoists that computation out of the allocator.)  And by keeping
		// a lookup table of allocators indexed by the flag bits of interest we avoid a decision
		// tree inside GC::Alloc.

		const uint8_t* const sizeClassIndex;
		GCAlloc** allocsTable[(kRCObject|kContainsPointers)+1];

		void *m_contextVars[GCV_COUNT];

		// bitmap for what pages are in use, 2 bits for every page
		// 0 - not in use
		// 1 - used by GCAlloc
		// 3 - used by GCLargeAlloc

		uintptr_t memStart;
		uintptr_t memEnd;

		/**
		 * The bitmap for what pages are in use.  Any access to either the
		 * pageMap pointer or the bitmap requires pageMapLock.
		 *
		 * (Note: A better synchronization scheme might be to use atomic
		 * operations to read and write the pageMap pointer, writing it only
		 * from within m_lock; and then using atomic read and write
		 * operations--on Intel x86, these are just ordinary reads and
		 * writes--to access the bitmap, with writes again only from within
		 * m_lock.  This would require reallocating the pageMap more often,
		 * but at least write barriers wouldn't have to acquire the spinlock.)
		 */
		unsigned char *pageMap;

		// This is very hot
		int GetPageMapValue(uintptr_t addr) const;

		// This is warm - used in IsPointerToGCPage and in FindBeginningGuarded
		int GetPageMapValueGuarded(uintptr_t addr);

		/**
		 * Set the pageMap bits for the given address.  Those bits must be
		 * zero beforehand.
		 */
		void SetPageMapValue(uintptr_t addr, int val);

		/**
		 * Zero out the pageMap bits for the given address.
		 */
		void ClearPageMapValue(uintptr_t addr);

		void MarkGCPages(void *item, uint32_t numpages, int val);
		void UnmarkGCPages(void *item, uint32_t numpages);
		
		/**
		 * Mark a region of memory, this will search all memory pointed to recursively
		 * and mark any GC Objects it finds
		 */
		void ConservativeMarkRegion(const void *base, size_t bytes);

		GCAlloc *containsPointersAllocs[kNumSizeClasses];
		GCAlloc *containsPointersRCAllocs[kNumSizeClasses];
		GCAlloc *noPointersAllocs[kNumSizeClasses];
		GCLargeAlloc *largeAlloc;
		GCHeap *heap;

#ifdef _DEBUG
	public:
#else
	private:
#endif
		void ClearMarks();

#ifdef _DEBUG
	public:
		/**
		 * Kill any incremental mark in progress, then trace from roots and stack.  If
		 * stackStart/stackSize are NULL/0 then the thread stack is obtained and used.
		 *
		 * It's sometimes useful for mutator to call this; the ZCT reaper uses it for
		 * validating the heap.
		 *
		 * @return   true if it succeeded, false if there was a mark stack overflow.
		 */
		bool Trace(const void *stackStart=NULL, uint32_t stackSize=0);
#endif

	private:
		void Finalize();
		void Sweep();
		void ForceSweepAtShutdown();
		void MarkAllRoots(bool deep=false);
		void Mark();
		void MarkQueueAndStack(bool scanStack=true);
		void MarkItem(GCWorkItem &wi);

	public:
		// Sweep all small-block pages that need sweeping
		void SweepNeedsSweeping();
		
	private:
		bool finalizedValue;

		void AddToSmallEmptyBlockList(GCAlloc::GCBlock *b);

		/**
		 * List of pages to be swept, built up in Finalize.
		 */
		GCAlloc::GCBlock *smallEmptyPageList;
		
		void AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock *lb);

		/**
		 * List of pages to be swept, built up in Finalize.
		 */
		GCLargeAlloc::LargeBlock *largeEmptyPageList;
		
		vmpi_spin_lock_t m_rootListLock;

		GCRoot *m_roots;
		void AddRoot(GCRoot *root);
		void RemoveRoot(GCRoot *root);
		
		/**
		 * Points to the head of a linked list of callback objects.
		 */
		GCCallback *m_callbacks;
		void AddCallback(GCCallback *cb);
		void RemoveCallback(GCCallback *cb);

		// Deferred ref counting implementation
		ZCT zct;
#ifdef MMGC_REFCOUNT_PROFILING
		void AddToZCT(RCObject *obj, bool initial=false);
#else
		void AddToZCT(RCObject *obj);
#endif

		// Public for one hack from splay.cpp - no one else should call
		// this out of the GC.  (The usage pattern in that file could be
		// abstracted into a better API function here, probably.)
public:
#ifdef MMGC_REFCOUNT_PROFILING
		REALLY_INLINE void RemoveFromZCT(RCObject *obj, bool final=false);
#else
		REALLY_INLINE void RemoveFromZCT(RCObject *obj);
#endif

		static const void *Pointer(const void *p);

public:
		void DumpMemoryInfo();
#ifdef MMGC_MEMORY_PROFILER
		void DumpPauseInfo();
#endif
		
private:

		// PushWorkItem is defined in GC.cpp, always inlined in callers there.
		// item.ptr must not be NULL.
		void PushWorkItem(GCWorkItem item);

#ifdef _DEBUG
		/**
		 * Check the consistency of the free lists for all the allocators.
		 */
		void CheckFreelists();

		/**
		 * Conservatively find pointers in the entire heap referencing unmarked objects, 
		 * triggering assertions when one is found.  This is a sanity check on the GC
		 * marking algorithm.
		 */
		void FindUnmarkedPointers();

		/**
		 * Conservatively find pointers in an address range referencing unmarked objects, 
		 * triggering assertions when one is found.
		 */
		void UnmarkedScan(const void *mem, size_t size);
		
		// methods for incremental verification

		/**
		 * Scan a region of memory for white pointers. Used by
		 * FindMissingWriteBarriers.
		 */
		void WhitePointerScan(const void *mem, size_t size);

		/**
		 * Scan all GC memory (skipping roots). If a GC object is black make sure
		 * it has no pointers to white objects.
		 */
		void FindMissingWriteBarriers();

#endif

public:

#ifdef _DEBUG
		// Dump a list of objects that have pointers to the given location.
		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);

		/**
		 * Used by WhosPointingAtMe.
		 */
    	void ProbeForMatch(const void *mem, size_t size, uintptr_t value, int recurseDepth, int currentDepth);
#endif

		void DumpAlloc(GCAlloc *a, size_t& internal_waste, size_t& overhead);
		
		//This method returns the number bytes allocated for GC objects
		size_t GetBytesInUse();
		
		//This method is for more fine grained allocation details
		//It reports the total number of bytes requested (i.e. ask size) and
		//the number of bytes actually allocated.  The latter is the same
		//number as reported by GetBytesInUse()
		void GetUsageInfo(size_t& totalAskSize, size_t& totalAllocated);

		size_t GetNumBlocks();

		virtual void memoryStatusChange(MemoryStatus oldStatus, MemoryStatus newStatus);

		/* A portable replacement for alloca().
		 *
		 * Memory is allocated from the heap and not from the stack.  It is freed in 
		 * one of two ways: If the function returns normally then an auto_ptr like
		 * mechanism frees the memory.  If the function leaves by throwing an exception
		 * (or if one of its callees throws an exception) then the exception
		 * handling mechanism in Exception.{h,cpp} frees the memory by releasing 
		 * everything that is still allocated that was allocated since the exception
		 * handler was erected.
		 *
		 * The auto_ptr mechanism, based on the class MMgc::GC::AllocaAutoPtr, cannot be
		 * circumvented, as allocaPush() takes a reference to such an object as an argument.
		 *
		 * Typical usage:
		 *
		 *    MMgc::GC::AllocaAutoPtr _ptr;                      // by convention prefixed by "_"
		 *    int* ptr = (int*)core->allocaPush(_ptr, nbytes);  // by convention same name, no "_"
		 *
		 * In practice the VMPI_alloca() macro, defined in avmbuild.h, should be used so that
		 * real alloca() can be used on platforms where that makes sense.
		 *
		 * Benchmarks suggest that the performance differences from using this mechanism
		 * instead of real alloca() are slight to nonexistent, and that the heap allocation
		 * sometimes provides a performance improvement.
		 */
	public:
		struct AllocaStackSegment
		{
			void* start;				// first address; also, the RCRoot pointer
			void* limit;				// address past data
			void* top;					// address past live if this segment is not the top
			AllocaStackSegment* prev;	// segments further from the top
		};
		
		void allocaInit();
		void allocaShutdown();
		void allocaUnwind();			// restore to initial state
		void allocaPopToSlow(void* top);
		void* allocaPushSlow(size_t nbytes);
		void pushAllocaSegment(size_t nbytes);
		void popAllocaSegment();

		AllocaStackSegment* top_segment;// segment at the stack top
		void* stacktop;					// current first free word in top_segment
#ifdef _DEBUG
		size_t stackdepth;				// useful to have for debugging
#endif
		
	public:
		/* See documentation above */
		
		class AllocaAutoPtr
		{
			friend class GC;
		public:
			AllocaAutoPtr();
			~AllocaAutoPtr();
		private:
			GC* gc;
			void* unwindPtr;
		};
				
		void* allocaTop();
		
		void allocaPopTo(void* top);
	
		void* allocaPush(size_t nbytes, AllocaAutoPtr& x);

#ifdef DEBUGGER
 	public:
		// Attach an opaque sampler to this GC, overriding any previous sampler
 		void* GetAttachedSampler();
		
		// Retrieve the opaque sampler from this GC, NULL if none has been attached
 		void SetAttachedSampler(void *sampler);
 		
 	private:
		// An attached sampler, or NULL.  We need to attach the sampler to the GC
		// because it moves with the GC/AvmCore pair, it is not attached to a
		// particular thread.
 		void* m_sampler;
#endif

#ifdef MMGC_HEAP_GRAPH			

	public:

		void addToBlacklist(const void *gcptr);
		void removeFromBlacklist(const void *gcptr);

	private:

		GCHashtable blacklist;

		void printBlacklist();
		void pruneBlacklist();
		
		const void *findGCGraphBeginning(const void *addr, bool &wasDeletedGCRoot);
		void dumpBackPointerChain(const void *obj, HeapGraph &g);
		void dumpBackPointerChainHelper(const void *p, HeapGraph& g);
		HeapGraph mutatorGraph;		
		HeapGraph markerGraph;
#endif
	};

	// helper class to wipe out vtable pointer of members for DRC
	class Cleaner
	{
	public:
		Cleaner();
		~Cleaner();
		// don't let myself move between objects
		Cleaner& operator=(const Cleaner& /*rhs*/);
		void set(const void * _v, size_t _size);

		int *v;
		size_t size;
	};

	/**
	 * Stack object that takes care of many things including defining stack
	 * boundaries, doing low stack queued collections/ZCT reaps and stack cleaning
	 */
	class GCAutoEnter
	{
	public:
		GCAutoEnter(GC *gc);
		~GCAutoEnter();
		void Destroy() { gc = NULL; }
	private:
		GC* gc;
	};

	/**
	 * Undoes the affects of GCAutoEnter on entry and reapplies them upon exit
	 */
	class GCAutoEnterPause
	{
	public:
		GCAutoEnterPause(GC *gc);
		~GCAutoEnterPause();
	private:
		GC* gc;
		GCAutoEnter *enterSave;
	};
}

#endif /* __GC__ */