# Support for Intel Xeon E5 2600 series uncore monitoring
# see http://www.intel.com/content/dam/www/public/us/en/documents/design-guides/xeon-e5-2600-uncore-guide.pdf
# for more details on the events and formulas.

# aliases
aliases = {
     "QPIMatch1": "Q_Py_PCI_PMON_PKT_MATCH1",
     "QPIMask0": "Q_Py_PCI_PMON_PKT_MASK0",
     "QPIMatch0": "Q_Py_PCI_PMON_BOX_MATCH0",
     "PCUFilter": "PCU_MSR_PMON_BOX_FILTER",
     "CBoFilter": "Cn_MSR_PMON_BOX_FILTER",
     "QPIMask1": "Q_Py_PCI_PMON_PKT_MASK1",
}

events = {

# R3QPI:
     "R3QPI.CLOCKTICKS": {
          "Box": "R3QPI",
          "Category": "R3QPI UCLK Events",
          "Counters": "0-2",
          "Defn": "Counts the number of uclks in the QPI uclk domain.  This could be slightly different than the count in the Ubox because of enable/freeze delays.  However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
          "Desc": "Number of uclks in domain",
          "EvSel": 1,
     },
     "R3QPI.IIO_CREDITS_ACQUIRED": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Acquired",
          "EvSel": 32,
     },
     "R3QPI.IIO_CREDITS_ACQUIRED.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Acquired",
          "EvSel": 32,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.IIO_CREDITS_ACQUIRED.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Acquired",
          "EvSel": 32,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.IIO_CREDITS_ACQUIRED.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Acquired",
          "EvSel": 32,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.IIO_CREDITS_REJECT": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Rejected",
          "EvSel": 33,
     },
     "R3QPI.IIO_CREDITS_REJECT.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Rejected",
          "EvSel": 33,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.IIO_CREDITS_REJECT.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Rejected",
          "EvSel": 33,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.IIO_CREDITS_REJECT.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit Rejected",
          "EvSel": 33,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.IIO_CREDITS_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit In Use",
          "EvSel": 34,
     },
     "R3QPI.IIO_CREDITS_USED.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit In Use",
          "EvSel": 34,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.IIO_CREDITS_USED.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit In Use",
          "EvSel": 34,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.IIO_CREDITS_USED.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI IIO_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO.  There is one credit for each of these three message classes (three credits total).  NCS is used for reads to PCIe space, NCB is used for transfering data without coherency, and DRS is used for transfering data with coherency (cachable PCI transactions).  This event can only track one message class at a time.",
          "Desc": "to IIO BL Credit In Use",
          "EvSel": 34,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RING_AD_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 AD Ring in Use",
          "EvSel": 7,
     },
     "R3QPI.RING_AD_USED.CW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RING_AD_USED.CCW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.RING_AD_USED.CW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RING_AD_USED.CCW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RING_AK_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
          "Desc": "R3 AK Ring in Use",
          "EvSel": 8,
     },
     "R3QPI.RING_AK_USED.CW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
          "Desc": "R3 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RING_AK_USED.CCW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
          "Desc": "R3 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.RING_AK_USED.CW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
          "Desc": "R3 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RING_AK_USED.CCW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
          "Desc": "R3 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RING_BL_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 BL Ring in Use",
          "EvSel": 9,
     },
     "R3QPI.RING_BL_USED.CW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RING_BL_USED.CCW_EVEN": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.RING_BL_USED.CW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RING_BL_USED.CCW_ODD": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R3 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RING_IV_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.  The IV ring is unidirectional.  Whether UP or DN is used is dependent on the system programming.  Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
          "Desc": "R3 IV Ring in Use",
          "EvSel": 10,
     },
     "R3QPI.RING_IV_USED.ANY": {
          "Box": "R3QPI",
          "Category": "R3QPI RING Events",
          "Counters": "0-2",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.  The IV ring is unidirectional.  Whether UP or DN is used is dependent on the system programming.  Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
          "Desc": "R3 IV Ring in Use",
          "EvSel": 10,
          "Umask": "b00001111",
     },
     "R3QPI.RxR_BYPASSED": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
          "Desc": "Ingress Bypassed",
          "EvSel": 18,
     },
     "R3QPI.RxR_BYPASSED.AD": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
          "Desc": "Ingress Bypassed",
          "EvSel": 18,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RxR_CYCLES_NE": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
     },
     "R3QPI.RxR_CYCLES_NE.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.RxR_CYCLES_NE.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.RxR_CYCLES_NE.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RxR_CYCLES_NE.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RxR_CYCLES_NE.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RxR_CYCLES_NE.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Ingress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.RxR_INSERTS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
     },
     "R3QPI.RxR_INSERTS.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.RxR_INSERTS.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.RxR_INSERTS.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RxR_INSERTS.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RxR_INSERTS.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RxR_INSERTS.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Ingress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Allocations",
          "EvSel": 17,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.RxR_OCCUPANCY": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
     },
     "R3QPI.RxR_OCCUPANCY.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.RxR_OCCUPANCY.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.RxR_OCCUPANCY.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.RxR_OCCUPANCY.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.RxR_OCCUPANCY.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.RxR_OCCUPANCY.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI INGRESS Events",
          "Counters": 0,
          "Defn": "Accumulates the occupancy of a given QPI Ingress queue in each cycles.  This tracks one of the three ring Ingress buffers.  This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
          "Desc": "Ingress Occupancy Accumulator",
          "EvSel": 19,
          "MaxIncCyc": 32,
          "SubCtr": 1,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.TxR_CYCLES_FULL": {
          "Box": "R3QPI",
          "Category": "R3QPI EGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
          "Desc": "Egress Cycles Full",
          "EvSel": 37,
     },
     "R3QPI.TxR_CYCLES_NE": {
          "Box": "R3QPI",
          "Category": "R3QPI EGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the QPI Egress is not empty.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Egress Occupancy Accumulator event in order to calculate average queue occupancy.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Cycles Not Empty",
          "EvSel": 35,
     },
     "R3QPI.TxR_INSERTS": {
          "Box": "R3QPI",
          "Category": "R3QPI EGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of allocations into the QPI Egress.  This tracks one of the three rings that are used by the QPI agent.  This can be used in conjunction with the QPI Egress Occupancy Accumulator event in order to calculate average queue latency.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Allocations",
          "EvSel": 36,
     },
     "R3QPI.TxR_NACK": {
          "Box": "R3QPI",
          "Category": "R3QPI EGRESS Events",
          "Counters": "0-1",
          "Desc": "Egress NACK",
          "EvSel": 38,
     },
     "R3QPI.VN0_CREDITS_REJECT": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
     },
     "R3QPI.VN0_CREDITS_REJECT.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.VN0_CREDITS_REJECT.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.VN0_CREDITS_REJECT.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.VN0_CREDITS_REJECT.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.VN0_CREDITS_REJECT.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.VN0_CREDITS_REJECT.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a request failed to acquire a DRS VN0 credit.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed.  This should generally be a rare situation.",
          "Desc": "VN0 Credit Acquisition Failed on DRS",
          "EvSel": 55,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.VN0_CREDITS_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
     },
     "R3QPI.VN0_CREDITS_USED.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.VN0_CREDITS_USED.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.VN0_CREDITS_USED.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.VN0_CREDITS_USED.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.VN0_CREDITS_USED.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.VN0_CREDITS_USED.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VN0_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of times a VN0 credit was used on the DRS message channel.  In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into.  There are two credit pools, VNA and VN0.  VNA is a shared pool used to achieve high performance.  The VN0 pool has reserved entries for each message class and is used to prevent deadlock.  Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail.  This counts the number of times a VN0 credit was used.  Note that a single VN0 credit holds access to potentially multiple flit buffers.  For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits.  A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
          "Desc": "VN0 Credit Used",
          "EvSel": 54,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.VNA_CREDITS_ACQUIRED": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of QPI VNA Credit acquisitions.  This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder.  VNA credits are used by all message classes in order to communicate across QPI.  If a packet is unable to acquire credits, it will then attempt to use credts from the VN0 pool.  Note that a single packet may require multiple flit buffers (i.e. when data is being transfered).  Therefore, this event will increment by the number of credits acquired in each cycle.  Filtering based on message class is not provided.  One can count the number of packets transfered in a given message class using an qfclk event.",
          "Desc": "VNA credit Acquisitions",
          "EvSel": 51,
          "MaxIncCyc": 4,
     },
     "R3QPI.VNA_CREDITS_REJECT": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
     },
     "R3QPI.VNA_CREDITS_REJECT.NCS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxx1xxxxx",
     },
     "R3QPI.VNA_CREDITS_REJECT.NCB": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxxx1xxxx",
     },
     "R3QPI.VNA_CREDITS_REJECT.DRS": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxxxx1xxx",
     },
     "R3QPI.VNA_CREDITS_REJECT.SNP": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxxxxxx1x",
     },
     "R3QPI.VNA_CREDITS_REJECT.HOM": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxxxxxxx1",
     },
     "R3QPI.VNA_CREDITS_REJECT.NDR": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full).  It is possible to filter this event by message class.  Some packets use more than one flit buffer, and therefore must acquire multiple credits.  Therefore, one could get a reject even if the VNA credits were not fully used up.  The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress).  VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially.  This can happen if the rest of the uncore is unable to drain the requests fast enough.",
          "Desc": "VNA Credit Reject",
          "EvSel": 52,
          "Umask": "bxxxxx1xx",
     },
     "R3QPI.VNA_CREDIT_CYCLES_OUT": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel.  Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit.  Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
          "Desc": "Cycles with no VNA credits available",
          "EvSel": 49,
     },
     "R3QPI.VNA_CREDIT_CYCLES_USED": {
          "Box": "R3QPI",
          "Category": "R3QPI LINK_VNA_CREDITS Events",
          "Counters": "0-1",
          "Defn": "Number of QPI uclk cycles with one or more VNA credits in use.  This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
          "Desc": "Cycles with 1 or more VNA credits in use",
          "EvSel": 50,
     },

# CBO:
     "CBO.CLOCKTICKS": {
          "Box": "CBO",
          "Category": "CBO UCLK Events",
          "Counters": "0-3",
          "Desc": "Uncore Clocks",
          "EvSel": 0,
     },
     "CBO.COUNTER0_OCCUPANCY": {
          "Box": "CBO",
          "Category": "CBO OCCUPANCY Events",
          "Counters": "1-3",
          "Defn": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0.   The filtering available is found in the control register - threshold, invert and edge detect.   E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
          "Desc": "Counter 0 Occupancy",
          "EvSel": 31,
          "MaxIncCyc": 20,
          "SubCtr": 1,
     },
     "CBO.ISMQ_DRD_MISS_OCC": {
          "Box": "CBO",
          "Category": "CBO ISMQ Events",
          "Counters": "0-1",
          "EvSel": 33,
          "MaxIncCyc": 20,
          "SubCtr": 1,
     },
     "CBO.LLC_LOOKUP": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set filter mask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
          "Desc": "Cache Lookups",
          "EvSel": 52,
          "Notes": "Bit 0 of the umask must always be set for this event.  This allows us to match a given state (or states).  The state is programmed in Cn_MSR_PMON_BOX_FILTER.state.   The state field is a bit mask, so you can select (and monitor) multiple states at a time.  0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F.  For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field.   To monitor any lookup, set the field to 0x1F.",
     },
     "CBO.LLC_LOOKUP.DATA_READ": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set filter mask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
          "Desc": "Cache Lookups",
          "EvSel": 52,
          "Notes": "Bit 0 of the umask must always be set for this event.  This allows us to match a given state (or states).  The state is programmed in Cn_MSR_PMON_BOX_FILTER.state.   The state field is a bit mask, so you can select (and monitor) multiple states at a time.  0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F.  For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field.   To monitor any lookup, set the field to 0x1F.",
          "Umask": "b00000011",
     },
     "CBO.LLC_LOOKUP.REMOTE_SNOOP": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set filter mask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
          "Desc": "Cache Lookups",
          "EvSel": 52,
          "Notes": "Bit 0 of the umask must always be set for this event.  This allows us to match a given state (or states).  The state is programmed in Cn_MSR_PMON_BOX_FILTER.state.   The state field is a bit mask, so you can select (and monitor) multiple states at a time.  0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F.  For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field.   To monitor any lookup, set the field to 0x1F.",
          "Umask": "b00001001",
     },
     "CBO.LLC_LOOKUP.WRITE": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set filter mask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
          "Desc": "Cache Lookups",
          "EvSel": 52,
          "Notes": "Bit 0 of the umask must always be set for this event.  This allows us to match a given state (or states).  The state is programmed in Cn_MSR_PMON_BOX_FILTER.state.   The state field is a bit mask, so you can select (and monitor) multiple states at a time.  0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F.  For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field.   To monitor any lookup, set the field to 0x1F.",
          "Umask": "b00000101",
     },
     "CBO.LLC_LOOKUP.NID": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2.  This has numerous filters available.  Note the non-standard filtering equation.  This event will count requests that lookup the cache multiple times with multiple increments.  One must ALWAYS set filter mask bit 0 and select a state or states to match.  Otherwise, the event will count nothing.   CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
          "Desc": "Cache Lookups",
          "EvSel": 52,
          "Notes": "Bit 0 of the umask must always be set for this event.  This allows us to match a given state (or states).  The state is programmed in Cn_MSR_PMON_BOX_FILTER.state.   The state field is a bit mask, so you can select (and monitor) multiple states at a time.  0 = I (miss), 1 = S, 2 = E, 3 = M, 4 = F.  For example, if you wanted to monitor F and S hits, you could set 10010b in the 5-bit state field.   To monitor any lookup, set the field to 0x1F.",
          "Umask": "b01000001",
     },
     "CBO.LLC_VICTIMS": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
     },
     "CBO.LLC_VICTIMS.MISS": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
          "Umask": "bxxxx1xxx",
     },
     "CBO.LLC_VICTIMS.NID": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
          "Umask": "bx1xxxxxx",
     },
     "CBO.LLC_VICTIMS.S_STATE": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
          "Umask": "bxxxxx1xx",
     },
     "CBO.LLC_VICTIMS.E_STATE": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
          "Umask": "bxxxxxx1x",
     },
     "CBO.LLC_VICTIMS.M_STATE": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Counters": "0-1",
          "Defn": "Counts the number of lines that were victimized on a fill.  This can be filtered by the state that the line was in.",
          "Desc": "Lines Victimized",
          "EvSel": 55,
          "Umask": "bxxxxxxx1",
     },
     "CBO.MISC": {
          "Box": "CBO",
          "Category": "CBO MISC Events",
          "Counters": "0-1",
          "Defn": "Miscellaneous events in the Cbo.",
          "Desc": "Cbo Misc",
          "EvSel": 57,
     },
     "CBO.MISC.RFO_HIT_S": {
          "Box": "CBO",
          "Category": "CBO MISC Events",
          "Counters": "0-1",
          "Defn": "Miscellaneous events in the Cbo.",
          "Desc": "Cbo Misc",
          "EvSel": 57,
          "Umask": "bxxxx1xxx",
     },
     "CBO.MISC.RSPI_WAS_FSE": {
          "Box": "CBO",
          "Category": "CBO MISC Events",
          "Counters": "0-1",
          "Defn": "Miscellaneous events in the Cbo.",
          "Desc": "Cbo Misc",
          "EvSel": 57,
          "Umask": "bxxxxxxx1",
     },
     "CBO.MISC.STARTED": {
          "Box": "CBO",
          "Category": "CBO MISC Events",
          "Counters": "0-1",
          "Defn": "Miscellaneous events in the Cbo.",
          "Desc": "Cbo Misc",
          "EvSel": 57,
          "Umask": "bxxxxx1xx",
     },
     "CBO.MISC.WC_ALIASING": {
          "Box": "CBO",
          "Category": "CBO MISC Events",
          "Counters": "0-1",
          "Defn": "Miscellaneous events in the Cbo.",
          "Desc": "Cbo Misc",
          "EvSel": 57,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RING_AD_USED": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AD Ring In Use",
          "EvSel": 27,
     },
     "CBO.RING_AD_USED.UP_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AD Ring In Use",
          "EvSel": 27,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RING_AD_USED.DOWN_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AD Ring In Use",
          "EvSel": 27,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RING_AD_USED.DOWN_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AD Ring In Use",
          "EvSel": 27,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RING_AD_USED.UP_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AD Ring In Use",
          "EvSel": 27,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RING_AK_USED": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AK Ring In Use",
          "EvSel": 28,
     },
     "CBO.RING_AK_USED.UP_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AK Ring In Use",
          "EvSel": 28,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RING_AK_USED.DOWN_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AK Ring In Use",
          "EvSel": 28,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RING_AK_USED.DOWN_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AK Ring In Use",
          "EvSel": 28,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RING_AK_USED.UP_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "AK Ring In Use",
          "EvSel": 28,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RING_BL_USED": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from  the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "BL Ring in Use",
          "EvSel": 29,
     },
     "CBO.RING_BL_USED.UP_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from  the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "BL Ring in Use",
          "EvSel": 29,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RING_BL_USED.DOWN_ODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from  the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "BL Ring in Use",
          "EvSel": 29,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RING_BL_USED.DOWN_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from  the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "BL Ring in Use",
          "EvSel": 29,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RING_BL_USED.UP_EVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from  the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring.  On the left side of the ring, the \"UP\" direction is on the clockwise ring and \"DN\" is on the counter-clockwise ring.  On the right side of the ring, this is reversed.  The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring.  In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
          "Desc": "BL Ring in Use",
          "EvSel": 29,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RING_BOUNCES": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "0-1",
          "Desc": "Number of LLC responses that bounced on the Ring.",
          "EvSel": 5,
     },
     "CBO.RING_BOUNCES.IV_CORE": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "0-1",
          "Desc": "Number of LLC responses that bounced on the Ring.",
          "EvSel": 5,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RING_BOUNCES.AK_CORE": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "0-1",
          "Desc": "Number of LLC responses that bounced on the Ring.",
          "EvSel": 5,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RING_BOUNCES.BL_CORE": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "0-1",
          "Desc": "Number of LLC responses that bounced on the Ring.",
          "EvSel": 5,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RING_IV_USED": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  There is only 1 IV ring in JKT.  Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DOWN_EVEN.  To monitor the \"Odd\" ring, they should select both UP_ODD and DOWN_ODD.",
          "Desc": "BL Ring in Use",
          "EvSel": 30,
     },
     "CBO.RING_IV_USED.ANY": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "2-3",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.  There is only 1 IV ring in JKT.  Therefore, if one wants to monitor the \"Even\" ring, they should select both UP_EVEN and DOWN_EVEN.  To monitor the \"Odd\" ring, they should select both UP_ODD and DOWN_ODD.",
          "Desc": "BL Ring in Use",
          "EvSel": 30,
          "Umask": "b00001111",
     },
     "CBO.RING_SRC_THRTL": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Counters": "0-1",
          "EvSel": 7,
     },
     "CBO.RxR_EXT_STARVED": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts cycles in external starvation.  This occurs when one of the ingress queues is being starved by the other queues.",
          "Desc": "Ingress Arbiter Blocking Cycles",
          "EvSel": 18,
     },
     "CBO.RxR_EXT_STARVED.IPQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts cycles in external starvation.  This occurs when one of the ingress queues is being starved by the other queues.",
          "Desc": "Ingress Arbiter Blocking Cycles",
          "EvSel": 18,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_EXT_STARVED.ISMQ_BIDS": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts cycles in external starvation.  This occurs when one of the ingress queues is being starved by the other queues.",
          "Desc": "Ingress Arbiter Blocking Cycles",
          "EvSel": 18,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RxR_EXT_STARVED.ISMQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts cycles in external starvation.  This occurs when one of the ingress queues is being starved by the other queues.",
          "Desc": "Ingress Arbiter Blocking Cycles",
          "EvSel": 18,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RxR_EXT_STARVED.IRQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts cycles in external starvation.  This occurs when one of the ingress queues is being starved by the other queues.",
          "Desc": "Ingress Arbiter Blocking Cycles",
          "EvSel": 18,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RxR_INSERTS": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts number of allocations per cycle into the specified Ingress queue.",
          "Desc": "Ingress Allocations",
          "EvSel": 19,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
     },
     "CBO.RxR_INSERTS.VFIFO": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts number of allocations per cycle into the specified Ingress queue.",
          "Desc": "Ingress Allocations",
          "EvSel": 19,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "Umask": "bxxx1xxxx",
     },
     "CBO.RxR_INSERTS.IPQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts number of allocations per cycle into the specified Ingress queue.",
          "Desc": "Ingress Allocations",
          "EvSel": 19,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "Umask": "bxxxxx1xx",
     },
     "CBO.RxR_INSERTS.IRQ_REJECTED": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts number of allocations per cycle into the specified Ingress queue.",
          "Desc": "Ingress Allocations",
          "EvSel": 19,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_INSERTS.IRQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts number of allocations per cycle into the specified Ingress queue.",
          "Desc": "Ingress Allocations",
          "EvSel": 19,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "Umask": "bxxxxxxx1",
     },
     "CBO.RxR_IPQ_RETRY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a snoop (probe) request had to retry.  Filters exist to cover some of the common cases retries.",
          "Desc": "Probe Queue Retries",
          "EvSel": 49,
     },
     "CBO.RxR_IPQ_RETRY.QPI_CREDITS": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a snoop (probe) request had to retry.  Filters exist to cover some of the common cases retries.",
          "Desc": "Probe Queue Retries",
          "EvSel": 49,
          "Umask": "bxxx1xxxx",
     },
     "CBO.RxR_IPQ_RETRY.ADDR_CONFLICT": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a snoop (probe) request had to retry.  Filters exist to cover some of the common cases retries.",
          "Desc": "Probe Queue Retries",
          "EvSel": 49,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RxR_IPQ_RETRY.ANY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a snoop (probe) request had to retry.  Filters exist to cover some of the common cases retries.",
          "Desc": "Probe Queue Retries",
          "EvSel": 49,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RxR_IPQ_RETRY.FULL": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a snoop (probe) request had to retry.  Filters exist to cover some of the common cases retries.",
          "Desc": "Probe Queue Retries",
          "EvSel": 49,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_IRQ_RETRY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
     },
     "CBO.RxR_IRQ_RETRY.RTID": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RxR_IRQ_RETRY.QPI_CREDITS": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
          "Umask": "bxxx1xxxx",
     },
     "CBO.RxR_IRQ_RETRY.ADDR_CONFLICT": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RxR_IRQ_RETRY.ANY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RxR_IRQ_RETRY.FULL": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Desc": "Ingress Request Queue Rejects",
          "EvSel": 50,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_ISMQ_RETRY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
     },
     "CBO.RxR_ISMQ_RETRY.RTID": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
          "Umask": "bxxxx1xxx",
     },
     "CBO.RxR_ISMQ_RETRY.QPI_CREDITS": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
          "Umask": "bxxx1xxxx",
     },
     "CBO.RxR_ISMQ_RETRY.ANY": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
          "Umask": "bxxxxxxx1",
     },
     "CBO.RxR_ISMQ_RETRY.IIO_CREDITS": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
          "Umask": "bxx1xxxxx",
     },
     "CBO.RxR_ISMQ_RETRY.FULL": {
          "Box": "CBO",
          "Category": "CBO INGRESS_RETRY Events",
          "Counters": "0-1",
          "Defn": "Number of times a transaction flowing through the ISMQ had to retry.  Transaction pass through the ISMQ as responses for requests that already exist in the Cbo.  Some examples include: when data is returned or when snoop responses come back from the cores.",
          "Desc": "ISMQ Retries",
          "EvSel": 51,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_OCCUPANCY": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": 0,
          "Defn": "Counts number of entries in the specified Ingress queue in each cycle.",
          "Desc": "Ingress Occupancy",
          "EvSel": 17,
          "MaxIncCyc": 20,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "SubCtr": 1,
     },
     "CBO.RxR_OCCUPANCY.VFIFO": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": 0,
          "Defn": "Counts number of entries in the specified Ingress queue in each cycle.",
          "Desc": "Ingress Occupancy",
          "EvSel": 17,
          "MaxIncCyc": 20,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "SubCtr": 1,
          "Umask": "bxxx1xxxx",
     },
     "CBO.RxR_OCCUPANCY.IPQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": 0,
          "Defn": "Counts number of entries in the specified Ingress queue in each cycle.",
          "Desc": "Ingress Occupancy",
          "EvSel": 17,
          "MaxIncCyc": 20,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "SubCtr": 1,
          "Umask": "bxxxxx1xx",
     },
     "CBO.RxR_OCCUPANCY.IRQ_REJECTED": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": 0,
          "Defn": "Counts number of entries in the specified Ingress queue in each cycle.",
          "Desc": "Ingress Occupancy",
          "EvSel": 17,
          "MaxIncCyc": 20,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "SubCtr": 1,
          "Umask": "bxxxxxx1x",
     },
     "CBO.RxR_OCCUPANCY.IRQ": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Counters": 0,
          "Defn": "Counts number of entries in the specified Ingress queue in each cycle.",
          "Desc": "Ingress Occupancy",
          "EvSel": 17,
          "MaxIncCyc": 20,
          "Notes": "IRQ_REJECTED should not be Ored with the other umasks.",
          "SubCtr": 1,
          "Umask": "bxxxxxxx1",
     },
     "CBO.TOR_INSERTS": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
     },
     "CBO.TOR_INSERTS.NID_MISS_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01001010",
     },
     "CBO.TOR_INSERTS.NID_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01000001",
     },
     "CBO.TOR_INSERTS.MISS_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b00000011",
     },
     "CBO.TOR_INSERTS.NID_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01001000",
     },
     "CBO.TOR_INSERTS.NID_EVICTION": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01000100",
     },
     "CBO.TOR_INSERTS.NID_MISS_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01000011",
     },
     "CBO.TOR_INSERTS.EVICTION": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b00000100",
     },
     "CBO.TOR_INSERTS.WB": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b00010000",
     },
     "CBO.TOR_INSERTS.NID_WB": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b01010000",
     },
     "CBO.TOR_INSERTS.OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b00000001",
     },
     "CBO.TOR_INSERTS.MISS_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": "0-1",
          "Defn": "Counts the number of entries successfuly inserted into the TOR that match  qualifications specified by the subevent.  There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc  to DRD (0x182).",
          "Desc": "TOR Inserts",
          "EvSel": 53,
          "Umask": "b00001010",
     },
     "CBO.TOR_OCCUPANCY": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
     },
     "CBO.TOR_OCCUPANCY.NID_MISS_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b01001010",
     },
     "CBO.TOR_OCCUPANCY.NID_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b01000001",
     },
     "CBO.TOR_OCCUPANCY.MISS_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b00000011",
     },
     "CBO.TOR_OCCUPANCY.ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b00001000",
     },
     "CBO.TOR_OCCUPANCY.NID_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b01001000",
     },
     "CBO.TOR_OCCUPANCY.NID_EVICTION": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b01000100",
     },
     "CBO.TOR_OCCUPANCY.NID_MISS_OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b01000011",
     },
     "CBO.TOR_OCCUPANCY.EVICTION": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b00000100",
     },
     "CBO.TOR_OCCUPANCY.OPCODE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b00000001",
     },
     "CBO.TOR_OCCUPANCY.MISS_ALL": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Counters": 0,
          "Defn": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.   There are a number of subevent 'filters' but only a subset of the subevent combinations are valid.  Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set.  If, for example, one wanted to count DRD Local Misses, one should select \"MISS_OPC_MATCH\" and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
          "Desc": "TOR Occupancy",
          "EvSel": 54,
          "MaxIncCyc": 20,
          "SubCtr": 1,
          "Umask": "b00001010",
     },
     "CBO.TxR_ADS_USED": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "EvSel": 4,
     },
     "CBO.TxR_INSERTS": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
     },
     "CBO.TxR_INSERTS.BL_CACHE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxxxxx1xx",
     },
     "CBO.TxR_INSERTS.AK_CORE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxx1xxxxx",
     },
     "CBO.TxR_INSERTS.AD_CORE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxxx1xxxx",
     },
     "CBO.TxR_INSERTS.IV_CACHE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxxxx1xxx",
     },
     "CBO.TxR_INSERTS.BL_CORE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bx1xxxxxx",
     },
     "CBO.TxR_INSERTS.AK_CACHE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxxxxxx1x",
     },
     "CBO.TxR_INSERTS.AD_CACHE": {
          "Box": "CBO",
          "Category": "CBO EGRESS Events",
          "Counters": "0-1",
          "Defn": "Number of allocations into the Cbo Egress.  The Egress is used to queue up requests destined for the ring.",
          "Desc": "Egress Allocations",
          "EvSel": 2,
          "Umask": "bxxxxxxx1",
     },

# HA:
     "HA.ADDR_OPC_MATCH": {
          "Box": "HA",
          "Category": "HA ADDR_OPCODE_MATCH Events",
          "Counters": "0-3",
          "Desc": "QPI Address/Opcode Match",
          "EvSel": 32,
     },
     "HA.ADDR_OPC_MATCH.FILT": {
          "Box": "HA",
          "Category": "HA ADDR_OPCODE_MATCH Events",
          "Counters": "0-3",
          "Desc": "QPI Address/Opcode Match",
          "EvSel": 32,
          "Umask": "b00000011",
     },
     "HA.CLOCKTICKS": {
          "Box": "HA",
          "Category": "HA UCLK Events",
          "Counters": "0-3",
          "Defn": "Counts the number of uclks in the HA.  This will be slightly different than the count in the Ubox because of enable/freeze delays.  The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
          "Desc": "uclks",
          "EvSel": 0,
     },
     "HA.CONFLICT_CYCLES": {
          "Box": "HA",
          "Category": "HA CONFLICTS Events",
          "Counters": "0-3",
          "Desc": "Conflict Checks",
          "EvSel": 11,
          "Broken": 1,
     },
     "HA.CONFLICT_CYCLES.CONFLICT": {
          "Box": "HA",
          "Category": "HA CONFLICTS Events",
          "Counters": "0-3",
          "Desc": "Conflict Checks",
          "EvSel": 11,
          "Umask": "bxxxxxx1x",
     },
     "HA.CONFLICT_CYCLES.NO_CONFLICT": {
          "Box": "HA",
          "Category": "HA CONFLICTS Events",
          "Counters": "0-3",
          "Desc": "Conflict Checks",
          "EvSel": 11,
          "Umask": "bxxxxxxx1",
          "Broken": 1,
     },
     "HA.DIRECT2CORE_COUNT": {
          "Box": "HA",
          "Category": "HA DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Number of Direct2Core messages sent",
          "Desc": "Direct2Core Messages Sent",
          "EvSel": 17,
          "Broken": 1,
     },
     "HA.DIRECT2CORE_CYCLES_DISABLED": {
          "Box": "HA",
          "Category": "HA DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Number of cycles in which Direct2Core was disabled",
          "Desc": "Cycles when Direct2Core was Disabled",
          "EvSel": 18,
          "Obscure": 1,
          "Broken": 1,
     },
     "HA.DIRECT2CORE_TXN_OVERRIDE": {
          "Box": "HA",
          "Category": "HA DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Number of Reads where Direct2Core overridden",
          "Desc": "Number of Reads that had Direct2Core Overridden",
          "EvSel": 19,
          "Broken": 1,
     },
     "HA.DIRECTORY_LOOKUP": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of transactions that looked up the directory.  Can be filtered by requests that had to snoop and those that did not have to.",
          "Desc": "Directory Lookups",
          "EvSel": 12,
          "Notes": "Only valid for parts that implement the Directory",
          "Broken": 1,
     },
     "HA.DIRECTORY_LOOKUP.NO_SNP": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of transactions that looked up the directory.  Can be filtered by requests that had to snoop and those that did not have to.",
          "Desc": "Directory Lookups",
          "EvSel": 12,
          "Notes": "Only valid for parts that implement the Directory",
          "Umask": "bxxxxxx1x",
          "Broken": 1,
     },
     "HA.DIRECTORY_LOOKUP.SNP": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of transactions that looked up the directory.  Can be filtered by requests that had to snoop and those that did not have to.",
          "Desc": "Directory Lookups",
          "EvSel": 12,
          "Notes": "Only valid for parts that implement the Directory",
          "Umask": "bxxxxxxx1",
          "Broken": 1,
     },
     "HA.DIRECTORY_UPDATE": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of directory updates that were required.  These result in writes to the memory controller.  This can be filtered by directory sets and directory clears.",
          "Desc": "Directory Updates",
          "EvSel": 13,
          "Notes": "Only valid for parts that implement the Directory",
          "Broken": 1,
     },
     "HA.DIRECTORY_UPDATE.SET": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of directory updates that were required.  These result in writes to the memory controller.  This can be filtered by directory sets and directory clears.",
          "Desc": "Directory Updates",
          "EvSel": 13,
          "Notes": "Only valid for parts that implement the Directory",
          "Umask": "bxxxxxxx1",
          "Broken": 1,
     },
     "HA.DIRECTORY_UPDATE.ANY": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of directory updates that were required.  These result in writes to the memory controller.  This can be filtered by directory sets and directory clears.",
          "Desc": "Directory Updates",
          "EvSel": 13,
          "Notes": "Only valid for parts that implement the Directory",
          "Umask": "bxxxxxx11",
          "Broken": 1,
     },
     "HA.DIRECTORY_UPDATE.CLEAR": {
          "Box": "HA",
          "Category": "HA DIRECTORY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of directory updates that were required.  These result in writes to the memory controller.  This can be filtered by directory sets and directory clears.",
          "Desc": "Directory Updates",
          "EvSel": 13,
          "Notes": "Only valid for parts that implement the Directory",
          "Umask": "bxxxxxx1x",
          "Broken": 1,
     },
     "HA.IGR_NO_CREDIT_CYCLES": {
          "Box": "HA",
          "Category": "HA QPI_IGR_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent.  This can be filtered by the different credit pools and the different links.",
          "Desc": "Cycles without QPI Ingress Credits",
          "EvSel": 34,
     },
     "HA.IGR_NO_CREDIT_CYCLES.AD_QPI1": {
          "Box": "HA",
          "Category": "HA QPI_IGR_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent.  This can be filtered by the different credit pools and the different links.",
          "Desc": "Cycles without QPI Ingress Credits",
          "EvSel": 34,
          "Umask": "bxxxxxx1x",
     },
     "HA.IGR_NO_CREDIT_CYCLES.AD_QPI0": {
          "Box": "HA",
          "Category": "HA QPI_IGR_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent.  This can be filtered by the different credit pools and the different links.",
          "Desc": "Cycles without QPI Ingress Credits",
          "EvSel": 34,
          "Umask": "bxxxxxxx1",
     },
     "HA.IGR_NO_CREDIT_CYCLES.BL_QPI1": {
          "Box": "HA",
          "Category": "HA QPI_IGR_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent.  This can be filtered by the different credit pools and the different links.",
          "Desc": "Cycles without QPI Ingress Credits",
          "EvSel": 34,
          "Umask": "bxxxx1xxx",
     },
     "HA.IGR_NO_CREDIT_CYCLES.BL_QPI0": {
          "Box": "HA",
          "Category": "HA QPI_IGR_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent.  This can be filtered by the different credit pools and the different links.",
          "Desc": "Cycles without QPI Ingress Credits",
          "EvSel": 34,
          "Umask": "bxxxxx1xx",
     },
     "HA.IMC_RETRY": {
          "Box": "HA",
          "Category": "HA IMC_MISC Events",
          "Counters": "0-3",
          "Desc": "Retry Events",
          "EvSel": 30,
     },
     "HA.IMC_WRITES": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
     },
     "HA.IMC_WRITES.PARTIAL_ISOCH": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
          "Umask": "bxxxx1xxx",
     },
     "HA.IMC_WRITES.ALL": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
          "Umask": "b00001111",
     },
     "HA.IMC_WRITES.PARTIAL": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
          "Umask": "bxxxxxx1x",
     },
     "HA.IMC_WRITES.FULL": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
          "Umask": "bxxxxxxx1",
     },
     "HA.IMC_WRITES.FULL_ISOCH": {
          "Box": "HA",
          "Category": "HA IMC_WRITES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of full line writes issued from the HA into the memory controller.  This counts for all four channels.  It can be filtered by full/partial and ISOCH/non-ISOCH.",
          "Desc": "HA to iMC Full Line Writes Issued",
          "EvSel": 26,
          "Umask": "bxxxxx1xx",
     },
     "HA.REQUESTS": {
          "Box": "HA",
          "Category": "HA REQUESTS Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO).  Writes include all writes (streaming, evictions, HitM, etc).",
          "Desc": "Read and Write Requests",
          "EvSel": 1,
     },
     "HA.REQUESTS.READS": {
          "Box": "HA",
          "Category": "HA REQUESTS Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO).  Writes include all writes (streaming, evictions, HitM, etc).",
          "Desc": "Read and Write Requests",
          "EvSel": 1,
          "Umask": "b00000011",
     },
     "HA.REQUESTS.WRITES": {
          "Box": "HA",
          "Category": "HA REQUESTS Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO).  Writes include all writes (streaming, evictions, HitM, etc).",
          "Desc": "Read and Write Requests",
          "EvSel": 1,
          "Umask": "b00001100",
     },
     "HA.RPQ_CYCLES_NO_REG_CREDITS": {
          "Box": "HA",
          "Category": "HA RPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC.  In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue).  This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "iMC RPQ Credits Empty - Regular",
          "EvSel": 21,
          "MaxIncCyc": 4,
     },
     "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN1": {
          "Box": "HA",
          "Category": "HA RPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC.  In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue).  This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "iMC RPQ Credits Empty - Regular",
          "EvSel": 21,
          "MaxIncCyc": 4,
          "Umask": "bxxxxxx1x",
     },
     "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN2": {
          "Box": "HA",
          "Category": "HA RPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC.  In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue).  This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "iMC RPQ Credits Empty - Regular",
          "EvSel": 21,
          "MaxIncCyc": 4,
          "Umask": "bxxxxx1xx",
     },
     "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN3": {
          "Box": "HA",
          "Category": "HA RPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC.  In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue).  This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "iMC RPQ Credits Empty - Regular",
          "EvSel": 21,
          "MaxIncCyc": 4,
          "Umask": "bxxxx1xxx",
     },
     "HA.RPQ_CYCLES_NO_REG_CREDITS.CHN0": {
          "Box": "HA",
          "Category": "HA RPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting reads from the HA into the iMC.  In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue).  This queue is broken into regular credits/buffers that are used by general reads, and \"special\" requests such as ISOCH reads.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "iMC RPQ Credits Empty - Regular",
          "EvSel": 21,
          "MaxIncCyc": 4,
          "Umask": "bxxxxxxx1",
     },
     "HA.TAD_REQUESTS_G0": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
     },
     "HA.TAD_REQUESTS_G0.REGION0": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
     },
     "HA.TAD_REQUESTS_G0.REGION7": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "b1xxxxxxx",
     },
     "HA.TAD_REQUESTS_G0.REGION3": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
     },
     "HA.TAD_REQUESTS_G0.REGION4": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxxx1xxxx",
     },
     "HA.TAD_REQUESTS_G0.REGION2": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
     },
     "HA.TAD_REQUESTS_G0.REGION1": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
     },
     "HA.TAD_REQUESTS_G0.REGION5": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bxx1xxxxx",
     },
     "HA.TAD_REQUESTS_G0.REGION6": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 0 to 7.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 0",
          "EvSel": 27,
          "MaxIncCyc": 2,
          "Umask": "bx1xxxxxx",
     },
     "HA.TAD_REQUESTS_G1": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 1",
          "EvSel": 28,
          "MaxIncCyc": 2,
     },
     "HA.TAD_REQUESTS_G1.REGION9": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 1",
          "EvSel": 28,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
     },
     "HA.TAD_REQUESTS_G1.REGION10": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 1",
          "EvSel": 28,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
     },
     "HA.TAD_REQUESTS_G1.REGION11": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 1",
          "EvSel": 28,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
     },
     "HA.TAD_REQUESTS_G1.REGION8": {
          "Box": "HA",
          "Category": "HA TAD Events",
          "Counters": "0-3",
          "Defn": "Counts the number of HA requests to a given TAD region.  There are up to 11 TAD (target address decode) regions in each home agent.  All requests destined for the memory controller must first be decoded to determine which TAD region they are in.  This event is filtered based on the TAD region ID, and covers regions 8 to 10.  This event is useful for understanding how applications are using the memory that is spread across the different memory regions.  It is particularly useful for \"Monroe\" systems that use the TAD to enable individual channels to enter self-refresh to save power.",
          "Desc": "HA Requests to a TAD Region - Group 1",
          "EvSel": 28,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
     },
     "HA.TRACKER_INSERTS": {
          "Box": "HA",
          "Category": "HA TRACKER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of allocations into the local HA tracker pool.  This can be used in conjunction with the occupancy accumulation event in order to calculate average latency.  One cannot filter between reads and writes.  HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
          "Desc": "Tracker Allocations",
          "EvSel": 6,
     },
     "HA.TRACKER_INSERTS.ALL": {
          "Box": "HA",
          "Category": "HA TRACKER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of allocations into the local HA tracker pool.  This can be used in conjunction with the occupancy accumulation event in order to calculate average latency.  One cannot filter between reads and writes.  HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
          "Desc": "Tracker Allocations",
          "EvSel": 6,
          "Umask": "b00000011",
     },
     "HA.TxR_AD": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of outbound transactions on the AD ring.  This can be filtered by the NDR and SNP message classes.  See the filter descriptions for more details.",
          "Desc": "Outbound NDR Ring Transactions",
          "EvSel": 15,
     },
     "HA.TxR_AD.SNP": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of outbound transactions on the AD ring.  This can be filtered by the NDR and SNP message classes.  See the filter descriptions for more details.",
          "Desc": "Outbound NDR Ring Transactions",
          "EvSel": 15,
          "Umask": "bxxxxxx1x",
     },
     "HA.TxR_AD.NDR": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of outbound transactions on the AD ring.  This can be filtered by the NDR and SNP message classes.  See the filter descriptions for more details.",
          "Desc": "Outbound NDR Ring Transactions",
          "EvSel": 15,
          "Umask": "bxxxxxxx1",
     },
     "HA.TxR_AD_CYCLES_FULL": {
          "Box": "HA",
          "Category": "HA AD_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AD Egress Full",
          "Desc": "AD Egress Full",
          "EvSel": 42,
     },
     "HA.TxR_AD_CYCLES_FULL.SCHED1": {
          "Box": "HA",
          "Category": "HA AD_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AD Egress Full",
          "Desc": "AD Egress Full",
          "EvSel": 42,
          "Umask": "bxxxxxx1x",
     },
     "HA.TxR_AD_CYCLES_FULL.ALL": {
          "Box": "HA",
          "Category": "HA AD_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AD Egress Full",
          "Desc": "AD Egress Full",
          "EvSel": 42,
          "Umask": "bxxxxxx11",
     },
     "HA.TxR_AD_CYCLES_FULL.SCHED0": {
          "Box": "HA",
          "Category": "HA AD_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AD Egress Full",
          "Desc": "AD Egress Full",
          "EvSel": 42,
          "Umask": "bxxxxxxx1",
     },
     "HA.TxR_AK_CYCLES_FULL": {
          "Box": "HA",
          "Category": "HA AK_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AK Egress Full",
          "Desc": "AK Egress Full",
          "EvSel": 50,
     },
     "HA.TxR_AK_CYCLES_FULL.SCHED1": {
          "Box": "HA",
          "Category": "HA AK_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AK Egress Full",
          "Desc": "AK Egress Full",
          "EvSel": 50,
          "Umask": "bxxxxxx1x",
     },
     "HA.TxR_AK_CYCLES_FULL.ALL": {
          "Box": "HA",
          "Category": "HA AK_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AK Egress Full",
          "Desc": "AK Egress Full",
          "EvSel": 50,
          "Umask": "bxxxxxx11",
     },
     "HA.TxR_AK_CYCLES_FULL.SCHED0": {
          "Box": "HA",
          "Category": "HA AK_EGRESS Events",
          "Counters": "0-3",
          "Defn": "AK Egress Full",
          "Desc": "AK Egress Full",
          "EvSel": 50,
          "Umask": "bxxxxxxx1",
     },
     "HA.TxR_AK_NDR": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of outbound NDR transactions sent on the AK ring.  NDR stands for \"non-data response\" and is generally used for completions that do not include data.  AK NDR is used for messages to the local socket.",
          "Desc": "Outbound NDR Ring Transactions",
          "EvSel": 14,
     },
     "HA.TxR_BL": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS messages sent out on the BL ring.   This can be filtered by the destination.",
          "Desc": "Outbound DRS Ring Transactions to Cache",
          "EvSel": 16,
     },
     "HA.TxR_BL.DRS_QPI": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS messages sent out on the BL ring.   This can be filtered by the destination.",
          "Desc": "Outbound DRS Ring Transactions to Cache",
          "EvSel": 16,
          "Umask": "bxxxxx1xx",
     },
     "HA.TxR_BL.DRS_CACHE": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS messages sent out on the BL ring.   This can be filtered by the destination.",
          "Desc": "Outbound DRS Ring Transactions to Cache",
          "EvSel": 16,
          "Umask": "bxxxxxxx1",
     },
     "HA.TxR_BL.DRS_CORE": {
          "Box": "HA",
          "Category": "HA OUTBOUND_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS messages sent out on the BL ring.   This can be filtered by the destination.",
          "Desc": "Outbound DRS Ring Transactions to Cache",
          "EvSel": 16,
          "Umask": "bxxxxxx1x",
     },
     "HA.TxR_BL_CYCLES_FULL": {
          "Box": "HA",
          "Category": "HA BL_EGRESS Events",
          "Counters": "0-3",
          "Defn": "BL Egress Full",
          "Desc": "BL Egress Full",
          "EvSel": 54,
     },
     "HA.TxR_BL_CYCLES_FULL.SCHED1": {
          "Box": "HA",
          "Category": "HA BL_EGRESS Events",
          "Counters": "0-3",
          "Defn": "BL Egress Full",
          "Desc": "BL Egress Full",
          "EvSel": 54,
          "Umask": "bxxxxxx1x",
     },
     "HA.TxR_BL_CYCLES_FULL.ALL": {
          "Box": "HA",
          "Category": "HA BL_EGRESS Events",
          "Counters": "0-3",
          "Defn": "BL Egress Full",
          "Desc": "BL Egress Full",
          "EvSel": 54,
          "Umask": "bxxxxxx11",
     },
     "HA.TxR_BL_CYCLES_FULL.SCHED0": {
          "Box": "HA",
          "Category": "HA BL_EGRESS Events",
          "Counters": "0-3",
          "Defn": "BL Egress Full",
          "Desc": "BL Egress Full",
          "EvSel": 54,
          "Umask": "bxxxxxxx1",
     },
     "HA.WPQ_CYCLES_NO_REG_CREDITS": {
          "Box": "HA",
          "Category": "HA WPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC.  In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue).  This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular",
          "EvSel": 24,
          "MaxIncCyc": 4,
     },
     "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN1": {
          "Box": "HA",
          "Category": "HA WPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC.  In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue).  This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular",
          "EvSel": 24,
          "MaxIncCyc": 4,
          "Umask": "bxxxxxx1x",
     },
     "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN2": {
          "Box": "HA",
          "Category": "HA WPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC.  In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue).  This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular",
          "EvSel": 24,
          "MaxIncCyc": 4,
          "Umask": "bxxxxx1xx",
     },
     "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN3": {
          "Box": "HA",
          "Category": "HA WPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC.  In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue).  This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular",
          "EvSel": 24,
          "MaxIncCyc": 4,
          "Umask": "bxxxx1xxx",
     },
     "HA.WPQ_CYCLES_NO_REG_CREDITS.CHN0": {
          "Box": "HA",
          "Category": "HA WPQ_CREDITS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when there are no \"regular\" credits available for posting writes from the HA into the iMC.  In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue).  This queue is broken into regular credits/buffers that are used by general writes, and \"special\" requests such as ISOCH writes.  This count only tracks the regular credits  Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time.  One can filter based on the memory controller channel.  One or more channels can be tracked at a given time.",
          "Desc": "HA iMC CHN0 WPQ Credits Empty - Regular",
          "EvSel": 24,
          "MaxIncCyc": 4,
          "Umask": "bxxxxxxx1",
     },

# iMC:
     "iMC.ACT_COUNT": {
          "Box": "iMC",
          "Category": "iMC ACT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRAM Activate commands sent on this channel.  Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS.  One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
          "Desc": "DRAM Activate Count",
          "EvSel": 1,
     },
     "iMC.CAS_COUNT": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
     },
     "iMC.CAS_COUNT.WR_RMM": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "bxxxx1xxx",
     },
     "iMC.CAS_COUNT.RD_UNDERFILL": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "bxxxxxx1x",
     },
     "iMC.CAS_COUNT.ALL": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "b00001111",
     },
     "iMC.CAS_COUNT.RD": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "b00000011",
     },
     "iMC.CAS_COUNT.RD_REG": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "bxxxxxxx1",
     },
     "iMC.CAS_COUNT.WR_WMM": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "bxxxxx1xx",
     },
     "iMC.CAS_COUNT.WR": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Counters": "0-3",
          "Defn": "DRAM RD_CAS and WR_CAS Commands",
          "Desc": "DRAM RD_CAS and WR_CAS Commands.",
          "EvSel": 4,
          "Umask": "b00001100",
     },
     "iMC.DRAM_PRE_ALL": {
          "Box": "iMC",
          "Category": "iMC DRAM_PRE_ALL Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that the precharge all command was sent.",
          "Desc": "DRAM Precharge All Commands",
          "EvSel": 6,
     },
     "iMC.DRAM_REFRESH": {
          "Box": "iMC",
          "Category": "iMC DRAM_REFRESH Events",
          "Counters": "0-3",
          "Defn": "Counts the number of refreshes issued.",
          "Desc": "Number of DRAM Refreshes Issued",
          "EvSel": 5,
     },
     "iMC.DRAM_REFRESH.PANIC": {
          "Box": "iMC",
          "Category": "iMC DRAM_REFRESH Events",
          "Counters": "0-3",
          "Defn": "Counts the number of refreshes issued.",
          "Desc": "Number of DRAM Refreshes Issued",
          "EvSel": 5,
          "Umask": "bxxxxxx1x",
     },
     "iMC.DRAM_REFRESH.HIGH": {
          "Box": "iMC",
          "Category": "iMC DRAM_REFRESH Events",
          "Counters": "0-3",
          "Defn": "Counts the number of refreshes issued.",
          "Desc": "Number of DRAM Refreshes Issued",
          "EvSel": 5,
          "Umask": "bxxxxx1xx",
     },
     "iMC.ECC_CORRECTABLE_ERRORS": {
          "Box": "iMC",
          "Category": "iMC ECC Events",
          "Counters": "0-3",
          "Defn": "Counts the number of ECC errors detected and corrected by the iMC on this channel.  This counter is only useful with ECC DRAM devices.  This count will increment one time for each correction regardless of the number of bits corrected.  The iMC can correct up to 4 bit errors in independent channel mode and 8 bit erros in lockstep mode.",
          "Desc": "ECC Correctable Errors",
          "EvSel": 9,
     },
     "iMC.MAJOR_MODES": {
          "Box": "iMC",
          "Category": "iMC MAJOR_MODES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
          "Desc": "Cycles in a Major Mode",
          "EvSel": 7,
     },
     "iMC.MAJOR_MODES.ISOCH": {
          "Box": "iMC",
          "Category": "iMC MAJOR_MODES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
          "Desc": "Cycles in a Major Mode",
          "EvSel": 7,
          "Umask": "bxxxx1xxx",
     },
     "iMC.MAJOR_MODES.READ": {
          "Box": "iMC",
          "Category": "iMC MAJOR_MODES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
          "Desc": "Cycles in a Major Mode",
          "EvSel": 7,
          "Umask": "bxxxxxxx1",
     },
     "iMC.MAJOR_MODES.PARTIAL": {
          "Box": "iMC",
          "Category": "iMC MAJOR_MODES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
          "Desc": "Cycles in a Major Mode",
          "EvSel": 7,
          "Umask": "bxxxxx1xx",
     },
     "iMC.MAJOR_MODES.WRITE": {
          "Box": "iMC",
          "Category": "iMC MAJOR_MODES Events",
          "Counters": "0-3",
          "Defn": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel.   Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
          "Desc": "Cycles in a Major Mode",
          "EvSel": 7,
          "Umask": "bxxxxxx1x",
     },
     "iMC.POWER_CHANNEL_DLLOFF": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
          "Desc": "Channel DLLOFF Cycles",
          "EvSel": 132,
          "Notes": "IBT = Input Buffer Termination = Off",
     },
     "iMC.POWER_CHANNEL_PPD": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles when all the ranks in the channel are in PPD mode.  If IBT=off is enabled, then this can be used to count those cycles.  If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
          "Desc": "Channel PPD Cycles",
          "EvSel": 133,
          "MaxIncCyc": 4,
          "Notes": "IBT = Input Buffer Termination = On",
     },
     "iMC.POWER_CKE_CYCLES": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
     },
     "iMC.POWER_CKE_CYCLES.RANK5": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxx1xxxxx",
     },
     "iMC.POWER_CKE_CYCLES.RANK6": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bx1xxxxxx",
     },
     "iMC.POWER_CKE_CYCLES.RANK3": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxxxx1xxx",
     },
     "iMC.POWER_CKE_CYCLES.RANK4": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxxx1xxxx",
     },
     "iMC.POWER_CKE_CYCLES.RANK1": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxxxxxx1x",
     },
     "iMC.POWER_CKE_CYCLES.RANK0": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxxxxxxx1",
     },
     "iMC.POWER_CKE_CYCLES.RANK2": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "bxxxxx1xx",
     },
     "iMC.POWER_CKE_CYCLES.RANK7": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent in CKE ON mode.  The filter allows you to select a rank to monitor.  If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation.  Multiple counters will need to be used to track multiple ranks simultaneously.  There is no distinction between the different CKE modes (APD, PPDS, PPDF).  This can be determined based on the system programming.  These events should commonly be used with Invert to get the number of cycles in power saving mode.  Edge Detect is also useful here.  Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
          "Desc": "CKE_ON_CYCLES by Rank",
          "EvSel": 131,
          "MaxIncCyc": 16,
          "Umask": "b1xxxxxxx",
     },
     "iMC.POWER_CRITICAL_THROTTLE_CYCLES": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the iMC is in critical thermal throttling.  When this happens, all traffic is blocked.  This should be rare unless something bad is going on in the platform.  There is no filtering by rank for this event.",
          "Desc": "Critical Throttle Cycles",
          "EvSel": 134,
     },
     "iMC.POWER_SELF_REFRESH": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock.  This happens in some package C-states.  For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing.  One use of this is for Monroe technology.  Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
          "Desc": "Clock-Enabled Self-Refresh",
          "EvSel": 67,
     },
     "iMC.POWER_THROTTLE_CYCLES": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for any Rank",
          "EvSel": 65,
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK5": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 5",
          "EvSel": 65,
          "Umask": "bxx1xxxxx",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK6": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 6",
          "EvSel": 65,
          "Umask": "bx1xxxxxx",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK3": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 3",
          "EvSel": 65,
          "Umask": "bxxxx1xxx",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK4": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 4",
          "EvSel": 65,
          "Umask": "bxxx1xxxx",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK1": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 1",
          "EvSel": 65,
          "Umask": "bxxxxxx1x",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK0": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 0",
          "EvSel": 65,
          "Umask": "bxxxxxxx1",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK2": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 2",
          "EvSel": 65,
          "Umask": "bxxxxx1xx",
     },
     "iMC.POWER_THROTTLE_CYCLES.RANK7": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling.  It is not possible to distinguish between the two.  This can be filtered by rank.  If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
          "Desc": "Throttle Cycles for Rank 7",
          "EvSel": 65,
          "Umask": "b1xxxxxxx",
     },
     "iMC.PREEMPTION": {
          "Box": "iMC",
          "Category": "iMC PREEMPTION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times a read in the iMC preempts another read or write.  Generally reads to an open page are issued ahead of requests to closed pages.  This improves the page hit rate of the system.  However, high priority requests can cause pages of active requests to be closed in order to get them out.  This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
          "Desc": "Read Preemption Count",
          "EvSel": 8,
     },
     "iMC.PREEMPTION.RD_PREEMPT_WR": {
          "Box": "iMC",
          "Category": "iMC PREEMPTION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times a read in the iMC preempts another read or write.  Generally reads to an open page are issued ahead of requests to closed pages.  This improves the page hit rate of the system.  However, high priority requests can cause pages of active requests to be closed in order to get them out.  This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
          "Desc": "Read Preemption Count",
          "EvSel": 8,
          "Umask": "bxxxxxx1x",
     },
     "iMC.PREEMPTION.RD_PREEMPT_RD": {
          "Box": "iMC",
          "Category": "iMC PREEMPTION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times a read in the iMC preempts another read or write.  Generally reads to an open page are issued ahead of requests to closed pages.  This improves the page hit rate of the system.  However, high priority requests can cause pages of active requests to be closed in order to get them out.  This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
          "Desc": "Read Preemption Count",
          "EvSel": 8,
          "Umask": "bxxxxxxx1",
     },
     "iMC.PRE_COUNT": {
          "Box": "iMC",
          "Category": "iMC PRE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRAM Precharge commands sent on this channel.",
          "Desc": "DRAM Precharge commands.",
          "EvSel": 2,
     },
     "iMC.PRE_COUNT.PAGE_CLOSE": {
          "Box": "iMC",
          "Category": "iMC PRE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRAM Precharge commands sent on this channel.",
          "Desc": "DRAM Precharge commands.",
          "EvSel": 2,
          "Umask": "bxxxxxx1x",
     },
     "iMC.PRE_COUNT.PAGE_MISS": {
          "Box": "iMC",
          "Category": "iMC PRE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRAM Precharge commands sent on this channel.",
          "Desc": "DRAM Precharge commands.",
          "EvSel": 2,
          "Umask": "bxxxxxxx1",
     },
     "iMC.RPQ_CYCLES_FULL": {
          "Box": "iMC",
          "Category": "iMC RPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the Read Pending Queue is full.  When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC.  This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead.  We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM.  This event only tracks non-ISOC queue entries.",
          "Desc": "Read Pending Queue Full Cycles",
          "EvSel": 18,
     },
     "iMC.RPQ_CYCLES_NE": {
          "Box": "iMC",
          "Category": "iMC RPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the Read Pending Queue is not empty.  This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count).  The RPQ is used to schedule reads out to the memory controller and to track the requests.  Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC.  They deallocate after the CAS command has been issued to memory.  This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
          "Desc": "Read Pending Queue Not Empty",
          "EvSel": 17,
     },
     "iMC.RPQ_INSERTS": {
          "Box": "iMC",
          "Category": "iMC RPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of allocations into the Read Pending Queue.  This queue is used to schedule reads out to the memory controller and to track the requests.  Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC.  They deallocate after the CAS command has been issued to memory.  This includes both ISOCH and non-ISOCH requests.",
          "Desc": "Read Pending Queue Allocations",
          "EvSel": 16,
     },
     "iMC.RPQ_OCCUPANCY": {
          "Box": "iMC",
          "Category": "iMC RPQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the occupancies of the Read Pending Queue each cycle.  This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations).  The RPQ is used to schedule reads out to the memory controller and to track the requests.  Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
          "Desc": "Read Pending Queue Occupancy",
          "EvSel": 128,
          "MaxIncCyc": 22,
          "SubCtr": 1,
     },
     "iMC.WPQ_CYCLES_FULL": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the Write Pending Queue is full.  When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC.  This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
          "Desc": "Write Pending Queue Full Cycles",
          "EvSel": 34,
     },
     "iMC.WPQ_CYCLES_NE": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the Write Pending Queue is not empty.  This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count).  The WPQ is used to schedule write out to the memory controller and to track the writes.  Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC.  They deallocate after being issued to DRAM.  Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.  This is not to be confused with actually performing the write to DRAM.  Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
          "Desc": "Write Pending Queue Not Empty",
          "EvSel": 33,
     },
     "iMC.WPQ_INSERTS": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of allocations into the Write Pending Queue.  This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count).  The WPQ is used to schedule write out to the memory controller and to track the writes.  Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC.  They deallocate after being issued to DRAM.  Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.",
          "Desc": "Write Pending Queue Allocations",
          "EvSel": 32,
     },
     "iMC.WPQ_OCCUPANCY": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the occupancies of the Write Pending Queue each cycle.  This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations).  The WPQ is used to schedule write out to the memory controller and to track the writes.  Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC.  They deallocate after being issued to DRAM.  Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have \"posted\" to the iMC.  This is not to be confused with actually performing the write to DRAM.  Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.  So, we provide filtering based on if the request has posted or not.  By using the \"not posted\" filter, we can track how long writes spent in the iMC before completions were sent to the HA.  The \"posted\" filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory.  High average occupancies will generally coincide with high write major mode counts.",
          "Desc": "Write Pending Queue Occupancy",
          "EvSel": 129,
          "MaxIncCyc": 32,
          "SubCtr": 1,
     },
     "iMC.WPQ_READ_HIT": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue).  The iMC allows writes and reads to pass up other writes to different addresses.  Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address.  When reads hit, they are able to directly pull their data from the WPQ instead of going to memory.  Writes that hit will overwrite the existing data.  Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
          "Desc": "Write Pending Queue CAM Match",
          "EvSel": 35,
     },
     "iMC.WPQ_WRITE_HIT": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times a request hits in the WPQ (write-pending queue).  The iMC allows writes and reads to pass up other writes to different addresses.  Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address.  When reads hit, they are able to directly pull their data from the WPQ instead of going to memory.  Writes that hit will overwrite the existing data.  Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
          "Desc": "Write Pending Queue CAM Match",
          "EvSel": 36,
     },

# R2PCIe:
     "R2PCIe.CLOCKTICKS": {
          "Box": "R2PCIe",
          "Category": "R2PCIe UCLK Events",
          "Counters": "0-3",
          "Defn": "Counts the number of uclks in the R2PCIe uclk domain.  This could be slightly different than the count in the Ubox because of enable/freeze delays.  However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
          "Desc": "Number of uclks in domain",
          "EvSel": 1,
     },
     "R2PCIe.RING_AD_USED": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AD Ring in Use",
          "EvSel": 7,
     },
     "R2PCIe.RING_AD_USED.CW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxxxx1",
     },
     "R2PCIe.RING_AD_USED.CCW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxx1xx",
     },
     "R2PCIe.RING_AD_USED.CW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxxxx1x",
     },
     "R2PCIe.RING_AD_USED.CCW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AD ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AD Ring in Use",
          "EvSel": 7,
          "Umask": "bxxxx1xxx",
     },
     "R2PCIe.RING_AK_USED": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AK Ring in Use",
          "EvSel": 8,
     },
     "R2PCIe.RING_AK_USED.CW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxxxx1",
     },
     "R2PCIe.RING_AK_USED.CCW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxx1xx",
     },
     "R2PCIe.RING_AK_USED.CW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxxxx1x",
     },
     "R2PCIe.RING_AK_USED.CCW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the AK ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 AK Ring in Use",
          "EvSel": 8,
          "Umask": "bxxxx1xxx",
     },
     "R2PCIe.RING_BL_USED": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 BL Ring in Use",
          "EvSel": 9,
     },
     "R2PCIe.RING_BL_USED.CW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxxxx1",
     },
     "R2PCIe.RING_BL_USED.CCW_EVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxx1xx",
     },
     "R2PCIe.RING_BL_USED.CW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxxxx1x",
     },
     "R2PCIe.RING_BL_USED.CCW_ODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the BL ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
          "Desc": "R2 BL Ring in Use",
          "EvSel": 9,
          "Umask": "bxxxx1xxx",
     },
     "R2PCIe.RING_IV_USED": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop.  The IV ring is unidirectional.  Whether UP or DN is used is dependent on the system programming.  Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
          "Desc": "R2 IV Ring in Use",
          "EvSel": 10,
     },
     "R2PCIe.RING_IV_USED.ANY": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the IV ring is being used at this ring stop.  This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop.  The IV ring is unidirectional.  Whether UP or DN is used is dependent on the system programming.  Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
          "Desc": "R2 IV Ring in Use",
          "EvSel": 10,
          "Umask": "b00001111",
     },
     "R2PCIe.RxR_AK_BOUNCES": {
          "Box": "R2PCIe",
          "Category": "R2PCIe INGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of times when a request destined for the AK ingress bounced.",
          "Desc": "AK Ingress Bounced",
          "EvSel": 18,
     },
     "R2PCIe.RxR_CYCLES_NE": {
          "Box": "R2PCIe",
          "Category": "R2PCIe INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
     },
     "R2PCIe.RxR_CYCLES_NE.NCS": {
          "Box": "R2PCIe",
          "Category": "R2PCIe INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxx1xxxxx",
     },
     "R2PCIe.RxR_CYCLES_NE.NCB": {
          "Box": "R2PCIe",
          "Category": "R2PCIe INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxx1xxxx",
     },
     "R2PCIe.RxR_CYCLES_NE.DRS": {
          "Box": "R2PCIe",
          "Category": "R2PCIe INGRESS Events",
          "Counters": "0-1",
          "Defn": "Counts the number of cycles when the R2PCIe Ingress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.  Multiple ingress buffers can be tracked at a given time using multiple counters.",
          "Desc": "Ingress Cycles Not Empty",
          "EvSel": 16,
          "Umask": "bxxxx1xxx",
     },
     "R2PCIe.TxR_CYCLES_FULL": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
          "Desc": "Egress Cycles Full",
          "EvSel": 37,
     },
     "R2PCIe.TxR_CYCLES_FULL.AK": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
          "Desc": "Egress Cycles Full",
          "EvSel": 37,
          "Umask": "bxxxxxx1x",
     },
     "R2PCIe.TxR_CYCLES_FULL.BL": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
          "Desc": "Egress Cycles Full",
          "EvSel": 37,
          "Umask": "bxxxxx1xx",
     },
     "R2PCIe.TxR_CYCLES_FULL.AD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
          "Desc": "Egress Cycles Full",
          "EvSel": 37,
          "Umask": "bxxxxxxx1",
     },
     "R2PCIe.TxR_CYCLES_NE": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Cycles Not Empty",
          "EvSel": 35,
     },
     "R2PCIe.TxR_CYCLES_NE.AK": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Cycles Not Empty",
          "EvSel": 35,
          "Umask": "bxxxxxx1x",
     },
     "R2PCIe.TxR_CYCLES_NE.BL": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Cycles Not Empty",
          "EvSel": 35,
          "Umask": "bxxxxx1xx",
     },
     "R2PCIe.TxR_CYCLES_NE.AD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of cycles when the R2PCIe Egress is not empty.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Cycles Not Empty",
          "EvSel": 35,
          "Umask": "bxxxxxxx1",
     },
     "R2PCIe.TxR_INSERTS": {
          "Box": "R2PCIe",
          "Category": "R2PCIe EGRESS Events",
          "Counters": 0,
          "Defn": "Counts the number of allocations into the R2PCIe Egress.  This tracks one of the three rings that are used by the R2PCIe agent.  This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue latency.  Only a single Egress queue can be tracked at any given time.  It is not possible to filter based on direction or polarity.",
          "Desc": "Egress Allocations",
          "EvSel": 36,
     },

# PCU:
     "PCU.CLOCKTICKS": {
          "Box": "PCU",
          "Category": "PCU PCLK Events",
          "Counters": "0-3",
          "Defn": "The PCU runs off a fixed 800 MHz clock.  This event counts the number of pclk cycles measured while the counter was enabled.  The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
          "Desc": "pclk Cycles",
          "EvSel": 0,
     },
     "PCU.CORE0_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "EvSel": 3,
          "ExtSel": 1,
          "Notes": "This only tracks the hardware portion in the RCFSM (CFCFSM).  This portion is just doing the core C state transition.  It does not include any necessary frequency/voltage transitions.",
     },
     "PCU.CORE1_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 4,
     },
     "PCU.CORE2_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 5,
     },
     "PCU.CORE3_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 6,
     },
     "PCU.CORE4_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 7,
     },
     "PCU.CORE5_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 8,
     },
     "PCU.CORE6_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 9,
     },
     "PCU.CORE7_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions.  There is one event per core.",
          "Desc": "Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 10,
     },
     "PCU.DEMOTIONS_CORE0": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "ExtSel": 1,
          "EvSel": 30,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE1": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 31,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE2": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 32,
     },
     "PCU.DEMOTIONS_CORE3": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 33,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE4": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 34,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE5": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 35,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE6": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 36,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.DEMOTIONS_CORE7": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times when a configurable cores had a C-state demotion",
          "Desc": "Core C State Demotions",
          "EvSel": 37,
          "Filter": "PCUFilter[7:0]",
     },
     "PCU.FREQ_BAND0_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "EvSel": 11,
          "Filter": "PCUFilter[7:0]",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
     },
     "PCU.FREQ_BAND1_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "EvSel": 12,
          "Filter": "PCUFilter[15:8]",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
     },
     "PCU.FREQ_BAND2_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "EvSel": 13,
          "Filter": "PCUFilter[23:16]",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
     },
     "PCU.FREQ_BAND3_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "EvSel": 14,
          "Filter": "PCUFilter[31:24]",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
     },
     "PCU.FREQ_MAX_CURRENT_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when current is the upper limit on frequency.",
          "Desc": "Current Strongest Upper Limit Cycles",
          "EvSel": 7,
          "Notes": "This is fast path, will clear our other limits when it happens.  The slow loop portion, which covers the other limits, can double count EDP.  Clearing should fix this up in the next fast path event, but this will happen.  Add up all the cycles and it won't make sense, but the general distribution is true.",
     },
     "PCU.FREQ_MAX_LIMIT_THERMAL_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when thermal conditions are the upper limit on frequency.  This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature.  This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
          "Desc": "Thermal Strongest Upper Limit Cycles",
          "EvSel": 4,
     },
     "PCU.FREQ_MAX_OS_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the OS is the upper limit on frequency.",
          "Desc": "OS Strongest Upper Limit Cycles",
          "EvSel": 6,
          "Notes": "Essentially, this event says the OS is getting the frequency it requested.",
     },
     "PCU.FREQ_MAX_POWER_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when power is the upper limit on frequency.",
          "Desc": "Power Strongest Upper Limit Cycles",
          "EvSel": 5,
     },
     "PCU.FREQ_MIN_IO_P_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MIN_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower.  This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW.  This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
          "Desc": "IO P Limit Strongest Lower Limit Cycles",
          "ExtSel": 1,
          "EvSel": 1,
     },
     "PCU.FREQ_MIN_PERF_P_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_MIN_LIMIT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower.  Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down.  This is largely to minimize increases in snoop and remote read latencies.",
          "Desc": "Perf P Limit Strongest Lower Limit Cycles",
          "ExtSel": 1,
          "EvSel": 2,
     },
     "PCU.FREQ_TRANS_CYCLES": {
          "Box": "PCU",
          "Category": "PCU FREQ_TRANS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the system is changing frequency.  This can not be filtered by thread ID.  One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
          "Desc": "Cycles spent changing Frequency",
          "ExtSel": 1,
          "EvSel": 0,
     },
     "PCU.MEMORY_PHASE_SHEDDING_CYCLES": {
          "Box": "PCU",
          "Category": "PCU MEMORY_PHASE_SHEDDING Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the PCU has triggered memory phase shedding.  This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
          "Desc": "Memory Phase Shedding Cycles",
          "EvSel": 47,
          "Notes": "Is this the package C one?  Yes",
     },
     "PCU.POWER_STATE_OCCUPANCY": {
          "Box": "PCU",
          "Category": "PCU POWER_STATE_OCC Events",
          "Counters": "0-3",
          "Defn": "This is an occupancy event that tracks the number of cores that are in C0.  It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
          "Desc": "Number of cores in C0",
          "EvSel": 128,
          "MaxIncCyc": 8,
          "SubCtr": 1,
     },
     "PCU.POWER_STATE_OCCUPANCY.CORES_C3": {
          "Box": "PCU",
          "Category": "PCU POWER_STATE_OCC Events",
          "Counters": "0-3",
          "Defn": "This is an occupancy event that tracks the number of cores that are in C3.  It can be used by itself to get the average number of cores in C3, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
          "Desc": "Number of cores in C3",
          "EvSel": 128,
          "MaxIncCyc": 8,
          "SubCtr": 1,
          "Umask": "b10000000",
     },
     "PCU.POWER_STATE_OCCUPANCY.CORES_C0": {
          "Box": "PCU",
          "Category": "PCU POWER_STATE_OCC Events",
          "Counters": "0-3",
          "Defn": "This is an occupancy event that tracks the number of cores that are in C0.  It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
          "Desc": "Number of cores in C0",
          "EvSel": 128,
          "MaxIncCyc": 8,
          "SubCtr": 1,
          "Umask": "b01000000",
     },
     "PCU.POWER_STATE_OCCUPANCY.CORES_C6": {
          "Box": "PCU",
          "Category": "PCU POWER_STATE_OCC Events",
          "Counters": "0-3",
          "Defn": "This is an occupancy event that tracks the number of cores that are in C6.  It can be used by itself to get the average number of cores in C6, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
          "Desc": "Number of cores in C6",
          "EvSel": 128,
          "MaxIncCyc": 8,
          "SubCtr": 1,
          "Umask": "b11000000",
     },
     "PCU.PROCHOT_EXTERNAL_CYCLES": {
          "Box": "PCU",
          "Category": "PCU PROCHOT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that we are in external PROCHOT mode.  This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
          "Desc": "External Prochot",
          "EvSel": 10,
     },
     "PCU.PROCHOT_INTERNAL_CYCLES": {
          "Box": "PCU",
          "Category": "PCU PROCHOT Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that we are in Interal PROCHOT mode.  This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
          "Desc": "ExtSel Prochot",
          "EvSel": 9,
     },
     "PCU.TOTAL_TRANSITION_CYCLES": {
          "Box": "PCU",
          "Category": "PCU CORE_C_STATE_TRANSITION Events",
          "Counters": "0-3",
          "Defn": "Number of cycles spent performing core C state transitions across all cores.",
          "Desc": "Total Core C State Transition Cycles",
          "ExtSel": 1,
          "EvSel": 11,
     },
     "PCU.VOLT_TRANS_CYCLES_CHANGE": {
          "Box": "PCU",
          "Category": "PCU VOLT_TRANS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the system is changing voltage.  There is no filtering supported with this event.  One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.  This event is calculated by or'ing together the increasing and decreasing events.",
          "Desc": "Cycles Changing Voltage",
          "EvSel": 3,
     },
     "PCU.VOLT_TRANS_CYCLES_DECREASE": {
          "Box": "PCU",
          "Category": "PCU VOLT_TRANS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the system is decreasing voltage.  There is no filtering supported with this event.  One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
          "Desc": "Cycles Decreasing Voltage",
          "EvSel": 2,
     },
     "PCU.VOLT_TRANS_CYCLES_INCREASE": {
          "Box": "PCU",
          "Category": "PCU VOLT_TRANS Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the system is increasing voltage.  There is no filtering supported with this event.  One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
          "Desc": "Cycles Increasing Voltage",
          "EvSel": 1,
     },
     "PCU.VR_HOT_CYCLES": {
          "Box": "PCU",
          "Category": "PCU VR_HOT Events",
          "Counters": "0-3",
          "Desc": "VR Hot",
          "EvSel": 50,
     },

# QPI_LL:
     "QPI_LL.CLOCKTICKS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CFCLK Events",
          "Counters": "0-3",
          "Defn": "Counts the number of clocks in the QPI LL.  This clock runs at 1/8th the \"GT/s\" speed of the QPI link.  For example, a 8GT/s link will have qfclk or 1GHz.  JKT does not support dynamic link speeds, so this frequency is fixed.",
          "Desc": "Number of qfclks",
          "EvSel": 20,
     },
     "QPI_LL.CTO_COUNT": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Counters": "0-3",
          "Defn": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots.  If both slots trigger in a given cycle, the event will increment by 2.  You can use edge detect to count the number of cases when both events triggered.",
          "Desc": "Count of CTO Events",
          "ExtSel": 1,
          "EvSel": 56,
          "MaxIncCyc": 2,
          "SubCtr": 1,
     },
     "QPI_LL.DIRECT2CORE": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
          "Desc": "Direct 2 Core Spawning",
          "EvSel": 19,
     },
     "QPI_LL.DIRECT2CORE.FAILURE_RBT": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
          "Desc": "Direct 2 Core Spawning",
          "EvSel": 19,
          "Umask": "bxxxxx1xx",
     },
     "QPI_LL.DIRECT2CORE.SUCCESS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
          "Desc": "Direct 2 Core Spawning",
          "EvSel": 19,
          "Umask": "bxxxxxxx1",
     },
     "QPI_LL.DIRECT2CORE.FAILURE_CREDITS_RBT": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
          "Desc": "Direct 2 Core Spawning",
          "EvSel": 19,
          "Umask": "bxxxx1xxx",
     },
     "QPI_LL.DIRECT2CORE.FAILURE_CREDITS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Counters": "0-3",
          "Defn": "Counts the number of DRS packets that we attempted to do direct2core on.  There are 4 mutually exlusive filters.  Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases.  Note that this does not count packets that are not candidates for Direct2Core.  The only candidates for Direct2Core are DRS packets destined for Cbos.",
          "Desc": "Direct 2 Core Spawning",
          "EvSel": 19,
          "Umask": "bxxxxxx1x",
     },
     "QPI_LL.L1_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER Events",
          "Counters": "0-3",
          "Defn": "Number of QPI qfclk cycles spent in L1 power mode.  L1 is a mode that totally shuts down a QPI link.  Use edge detect to count the number of instances when the QPI link entered L1.  Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
          "Desc": "Cycles in L1",
          "EvSel": 18,
     },
     "QPI_LL.RxL0P_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_RX Events",
          "Counters": "0-3",
          "Defn": "Number of QPI qfclk cycles spent in L0p power mode.  L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power.  It increases snoop and data transfer latencies and decreases overall bandwidth.  This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses.  Use edge detect to count the number of instances when the QPI link entered L0p.  Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
          "Desc": "Cycles in L0p",
          "EvSel": 16,
          "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.",
     },
     "QPI_LL.RxL0_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_RX Events",
          "Counters": "0-3",
          "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer.  L0 is the default mode which provides the highest performance with the most power.  Use edge detect to count the number of instances that the link entered L0.  Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.  The phy layer  sometimes leaves L0 for training, which will not be captured by this event.",
          "Desc": "Cycles in L0",
          "EvSel": 15,
     },
     "QPI_LL.RxL_BYPASSED": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress.  This is a latency optimization, and should generally be the common case.  If this value is less than the number of flits transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
          "Desc": "Rx Flit Buffer Bypassed",
          "EvSel": 9,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxxxxx1xx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NCB": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxxxxxx1x",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.SNP": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxxx1xxxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.HOM": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxxxx1xxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.DRS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxxxxxxx1",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VN0.NDR": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VN0 Credit Consumed",
          "EvSel": 30,
          "Umask": "bxx1xxxxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CREDITS_CONSUMED_VNA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RX_CREDITS_CONSUMED Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer).  This includes packets that went through the RxQ and those that were bypasssed.",
          "Desc": "VNA Credit Consumed",
          "EvSel": 29,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_CYCLES_NE": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles that the QPI RxQ was not empty.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
          "Desc": "RxQ Cycles Not Empty",
          "EvSel": 10,
     },
     "QPI_LL.RxL_FLITS_G0": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Received - Group 0",
          "EvSel": 1,
          "MaxIncCyc": 2,
     },
     "QPI_LL.RxL_FLITS_G0.NON_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Received - Group 0",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
     },
     "QPI_LL.RxL_FLITS_G0.DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Received - Group 0",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
     },
     "QPI_LL.RxL_FLITS_G0.IDLE": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Received - Group 0",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
     },
     "QPI_LL.RxL_FLITS_G1": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
     },
     "QPI_LL.RxL_FLITS_G1.DRS_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.HOM_NONREQ": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.HOM_REQ": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.DRS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "b00011000",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.HOM": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "b00000110",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.SNP": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G1.DRS_NONDATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 1",
          "EvSel": 2,
          "MaxIncCyc": 2,
          "Umask": "bxxx1xxxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NCS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "bxxx1xxxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NCB": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "b00001100",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NDR_AD": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NCB_NONDATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NDR_AK": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_FLITS_G2.NCB_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits received from the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Received - Group 2",
          "EvSel": 3,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
          "Desc": "Rx Flit Buffer Allocations",
          "EvSel": 8,
     },
     "QPI_LL.RxL_INSERTS_DRS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only DRS flits.",
          "Desc": "Rx Flit Buffer Allocations - DRS",
          "EvSel": 9,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS_HOM": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only HOM flits.",
          "Desc": "Rx Flit Buffer Allocations - HOM",
          "EvSel": 12,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS_NCB": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only NCB flits.",
          "Desc": "Rx Flit Buffer Allocations - NCB",
          "EvSel": 10,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS_NCS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only NCS flits.",
          "Desc": "Rx Flit Buffer Allocations - NCS",
          "EvSel": 11,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS_NDR": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only NDR flits.",
          "Desc": "Rx Flit Buffer Allocations - NDR",
          "EvSel": 14,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_INSERTS_SNP": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Rx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.  This monitors only SNP flits.",
          "Desc": "Rx Flit Buffer Allocations - SNP",
          "EvSel": 13,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
          "Desc": "RxQ Occupancy - All Packets",
          "EvSel": 11,
          "MaxIncCyc": 128,
          "SubCtr": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_DRS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors DRS flits only.",
          "Desc": "RxQ Occupancy - DRS",
          "EvSel": 21,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_HOM": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors HOM flits only.",
          "Desc": "RxQ Occupancy - HOM",
          "EvSel": 24,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_NCB": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors NCB flits only.",
          "Desc": "RxQ Occupancy - NCB",
          "EvSel": 22,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_NCS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors NCS flits only.",
          "Desc": "RxQ Occupancy - NCS",
          "EvSel": 23,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_NDR": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors NDR flits only.",
          "Desc": "RxQ Occupancy - NDR",
          "EvSel": 26,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.RxL_OCCUPANCY_SNP": {
          "Box": "QPI_LL",
          "Category": "QPI_LL RXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of elements in the QPI RxQ in each cycle.  Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface.  If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency.  This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.  This monitors SNP flits only.",
          "Desc": "RxQ Occupancy - SNP",
          "EvSel": 25,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },
     "QPI_LL.TxL0P_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_TX Events",
          "Counters": "0-3",
          "Defn": "Number of QPI qfclk cycles spent in L0p power mode.  L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power.  It increases snoop and data transfer latencies and decreases overall bandwidth.  This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses.  Use edge detect to count the number of instances when the QPI link entered L0p.  Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
          "Desc": "Cycles in L0p",
          "EvSel": 13,
          "Notes": "Using .edge_det to count transitions does not function if L1_POWER_CYCLES > 0.",
     },
     "QPI_LL.TxL0_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_TX Events",
          "Counters": "0-3",
          "Defn": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer.  L0 is the default mode which provides the highest performance with the most power.  Use edge detect to count the number of instances that the link entered L0.  Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.  The phy layer  sometimes leaves L0 for training, which will not be captured by this event.",
          "Desc": "Cycles in L0",
          "EvSel": 12,
     },
     "QPI_LL.TxL_BYPASSED": {
          "Box": "QPI_LL",
          "Category": "QPI_LL TXQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link.  However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
          "Desc": "Tx Flit Buffer Bypassed",
          "EvSel": 5,
     },
     "QPI_LL.TxL_CYCLES_NE": {
          "Box": "QPI_LL",
          "Category": "QPI_LL TXQ Events",
          "Counters": "0-3",
          "Defn": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link.  However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
          "Desc": "Tx Flit Buffer Cycles not Empty",
          "EvSel": 6,
     },
     "QPI_LL.TxL_FLITS_G0": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Transferred - Group 0",
          "EvSel": 0,
          "MaxIncCyc": 2,
     },
     "QPI_LL.TxL_FLITS_G0.NON_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Transferred - Group 0",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
     },
     "QPI_LL.TxL_FLITS_G0.DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Transferred - Group 0",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
     },
     "QPI_LL.TxL_FLITS_G0.IDLE": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits transmitted across the QPI Link.  It includes filters for Idle, protocol, and Data Flits.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
          "Desc": "Flits Transferred - Group 0",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
     },
     "QPI_LL.TxL_FLITS_G1": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.DRS_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.HOM_NONREQ": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.HOM_REQ": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.DRS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "b00011000",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.HOM": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "b00000110",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.SNP": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G1.DRS_NONDATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for SNP, HOM, and DRS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 1",
          "EvSel": 0,
          "MaxIncCyc": 2,
          "Umask": "bxxx1xxxx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NCS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxx1xxxx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NCB": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "b00001100",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NDR_AD": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxxx1",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NCB_NONDATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxx1xxx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NDR_AK": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxxx1x",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_FLITS_G2.NCB_DATA": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_TX Events",
          "Counters": "0-3",
          "Defn": "Counts the number of flits trasmitted across the QPI Link.  This is one of three \"groups\" that allow us to track flits.  It includes filters for NDR, NCB, and NCS message classes.  Each \"flit\" is made up of 80 bits of information (in addition to some ECC data).  In full-width (L0) mode, flits are made up of four \"fits\", each of which contains 20 bits of data (along with some additional ECC data).   In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit.  When one talks about QPI \"speed\" (for example, 8.0 GT/s), the \"transfers\" here refer to \"fits\".  Therefore, in L0, the system will transfer 1 \"flit\" at the rate of 1/4th the QPI speed.  One can calculate the bandwidth of the link by taking: flits*80b/time.  Note that this is not the same as \"data\" bandwidth.  For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual \"data\" and an additional 16 bits of other information.  To calculate \"data\" bandwidth, one should therefore do: data flits * 8B / time.",
          "Desc": "Flits Transferred - Group 2",
          "EvSel": 1,
          "MaxIncCyc": 2,
          "Umask": "bxxxxx1xx",
          "ExtSel": 1,
     },
     "QPI_LL.TxL_INSERTS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL TXQ Events",
          "Counters": "0-3",
          "Defn": "Number of allocations into the QPI Tx Flit Buffer.  Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link.  However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.  This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
          "Desc": "Tx Flit Buffer Allocations",
          "EvSel": 4,
     },
     "QPI_LL.TxL_OCCUPANCY": {
          "Box": "QPI_LL",
          "Category": "QPI_LL TXQ Events",
          "Counters": "0-3",
          "Defn": "Accumulates the number of flits in the TxQ.  Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link.  However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
          "Desc": "Tx Flit Buffer Occupancy",
          "EvSel": 7,
     },
     "QPI_LL.VNA_CREDIT_RETURNS": {
          "Box": "QPI_LL",
          "Category": "QPI_LL VNA_CREDIT_RETURN Events",
          "Counters": "0-3",
          "Defn": "Number of VNA credits returned.",
          "Desc": "VNA Credits Returned",
          "EvSel": 28,
          "ExtSel": 1,
     },
     "QPI_LL.VNA_CREDIT_RETURN_OCCUPANCY": {
          "Box": "QPI_LL",
          "Category": "QPI_LL VNA_CREDIT_RETURN Events",
          "Counters": "0-3",
          "Defn": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
          "Desc": "VNA Credits Pending Return - Occupancy",
          "EvSel": 27,
          "MaxIncCyc": 128,
          "SubCtr": 1,
          "ExtSel": 1,
     },

# UBOX:
     "UBOX.EVENT_MSG": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
     },
     "UBOX.EVENT_MSG.DOORBELL_RCVD": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
          "Umask": "bxxxx1xxx",
     },
     "UBOX.EVENT_MSG.IPI_RCVD": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
          "Umask": "bxxxxx1xx",
     },
     "UBOX.EVENT_MSG.INT_PRIO": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
          "Umask": "bxxx1xxxx",
     },
     "UBOX.EVENT_MSG.VLW_RCVD": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
          "Umask": "bxxxxxxx1",
     },
     "UBOX.EVENT_MSG.MSI_RCVD": {
          "Box": "UBOX",
          "Category": "UBOX EVENT_MSG Events",
          "Counters": "0-1",
          "Defn": "Virtual Logical Wire (legacy) message were received from Uncore.   Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "VLW Received",
          "EvSel": 66,
          "Umask": "bxxxxxx1x",
     },
     "UBOX.FILTER_MATCH": {
          "Box": "UBOX",
          "Category": "UBOX FILTER_MATCH Events",
          "Counters": "0-1",
          "Defn": "Filter match per thread (w/ or w/o Filter Enable).  Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "Filter Match",
          "EvSel": 65,
     },
     "UBOX.FILTER_MATCH.U2C_ENABLE": {
          "Box": "UBOX",
          "Category": "UBOX FILTER_MATCH Events",
          "Counters": "0-1",
          "Defn": "Filter match per thread (w/ or w/o Filter Enable).  Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "Filter Match",
          "EvSel": 65,
          "Umask": "bxxxxx1xx",
     },
     "UBOX.FILTER_MATCH.U2C_DISABLE": {
          "Box": "UBOX",
          "Category": "UBOX FILTER_MATCH Events",
          "Counters": "0-1",
          "Defn": "Filter match per thread (w/ or w/o Filter Enable).  Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "Filter Match",
          "EvSel": 65,
          "Umask": "bxxxx1xxx",
     },
     "UBOX.FILTER_MATCH.DISABLE": {
          "Box": "UBOX",
          "Category": "UBOX FILTER_MATCH Events",
          "Counters": "0-1",
          "Defn": "Filter match per thread (w/ or w/o Filter Enable).  Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "Filter Match",
          "EvSel": 65,
          "Umask": "bxxxxxx1x",
     },
     "UBOX.FILTER_MATCH.ENABLE": {
          "Box": "UBOX",
          "Category": "UBOX FILTER_MATCH Events",
          "Counters": "0-1",
          "Defn": "Filter match per thread (w/ or w/o Filter Enable).  Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
          "Desc": "Filter Match",
          "EvSel": 65,
          "Umask": "bxxxxxxx1",
     },
     "UBOX.LOCK_CYCLES": {
          "Box": "UBOX",
          "Category": "UBOX LOCK Events",
          "Counters": "0-1",
          "Defn": "Number of times an IDI Lock/SplitLock sequence was started",
          "Desc": "IDI Lock/SplitLock Cycles",
          "EvSel": 68,
     },
}
derived = {

# HA:
     "HA.PCT_CYCLES_BL_FULL": {
          "Box": "HA",
          "Category": "HA BL_EGRESS Events",
          "Defn": "Percentage of time the BL Egress Queue is full",
          "Desc": "Percent BL Egress Full",
          "Equation": "TxR_BL_CYCLES_FULL.ALL / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "HA.PCT_CYCLES_CONFLICT": {
          "Box": "HA",
          "Category": "HA CONFLICTS Events",
          "Defn": "Percentage of time in Conflict Resolution",
          "Desc": "Percent Conflict",
          "Equation": "CONFLICT_CYCLES.CONFLICT / SAMPLE_INTERVAL",
          "Broken": 1,
     },
     "HA.PCT_CYCLES_D2C_DISABLED": {
          "Box": "HA",
          "Category": "HA DIRECT2CORE Events",
          "Defn": "Percentage of time that Direct2Core was disabled.",
          "Desc": "Percent D2C Disabled",
          "Equation": "DIRECT2CORE_CYCLES_DISABLED / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "HA.PCT_RD_REQUESTS": {
          "Box": "HA",
          "Category": "HA REQUESTS Events",
          "Defn": "Percentage of HA traffic that is from Read Requests",
          "Desc": "Percent Read Requests",
          "Equation": "REQUESTS.READS / (REQUESTS.READS + REQUESTS.WRITES)",
     },
     "HA.PCT_WR_REQUESTS": {
          "Box": "HA",
          "Category": "HA REQUESTS Events",
          "Defn": "Percentage of HA traffic that is from Write Requests",
          "Desc": "Percent Write Requests",
          "Equation": "REQUESTS.WRITES / (REQUESTS.READS + REQUESTS.WRITES)",
     },

# iMC:
     "iMC.MEM_BW_READS": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Memory bandwidth consumed by reads.  Expressed in bytes.",
          "Desc": "Read Memory Bandwidth",
          "Equation": "(CAS_COUNT.RD * 64)",
     },
     "iMC.MEM_BW_TOTAL": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Total memory bandwidth.  Expressed in bytes.",
          "Desc": "Total Memory Bandwidth",
          "Equation": "MEM_BW_READS + MEM_BW_WRITES",
     },
     "iMC.MEM_BW_WRITES": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Memory bandwidth consumed by writes  Expressed in bytes.",
          "Desc": "Write Memory Bandwidth",
          "Equation": "(CAS_COUNT.WR * 64)",
     },
     "iMC.PCT_CYCLES_CRITICAL_THROTTLE": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles all DRAM ranks in critical thermal throttling",
          "Desc": "Percent Cycles Critical Throttle",
          "Equation": "POWER_CRITICAL_THROTTLE_CYCLES / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_CYCLES_DLOFF": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles all DRAM ranks in CKE slow (DLOFF) mode",
          "Desc": "Percent Cycles DLOFF",
          "Equation": "POWER_CHANNEL_DLLOFF / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_CYCLES_DRAM_RANKx_IN_CKE": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles DRAM rank (x) spent in CKE ON mode.",
          "Desc": "Percent Cycles DRAM Rank x in CKE",
          "Equation": "POWER_CKE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_CYCLES_DRAM_RANKx_IN_THR": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles DRAM rank (x) spent in thermal throttling.",
          "Desc": "Percent Cycles DRAM Rank x in CKE",
          "Equation": "POWER_THROTTLE_CYCLES.RANKx / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_CYCLES_PPD": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles all DRAM ranks in PPD mode",
          "Desc": "Percent Cycles PPD",
          "Equation": "POWER_CHANNEL_PPD / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_CYCLES_SELF_REFRESH": {
          "Box": "iMC",
          "Category": "iMC POWER Events",
          "Defn": "The percentage of cycles Memory is in self refresh power mode",
          "Desc": "Percent Cycles Self Refresh",
          "Equation": "POWER_SELF_REFRESH / MC_Chy_PCI_PMON_CTR_FIXED",
     },
     "iMC.PCT_RD_REQUESTS": {
          "Box": "iMC",
          "Category": "iMC RPQ Events",
          "Defn": "Percentage of read requests from total requests.",
          "Desc": "Percent Read Requests",
          "Equation": "RPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)",
     },
     "iMC.PCT_REQUESTS_PAGE_EMPTY": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Percentage of memory requests that resulted in Page Empty",
          "Desc": "Percent Requests Page Empty",
          "Equation": "(ACT_COUNT - PRE_COUNT.PAGE_MISS)/ (CAS_COUNT.RD + CAS_COUNT.WR)",
     },
     "iMC.PCT_REQUESTS_PAGE_HIT": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Percentage of memory requests that resulted in Page Hits",
          "Desc": "Percent Requests Page Hit",
          "Equation": "1 - (PCT_REQUESTS_PAGE_EMPTY + PCT_REQUESTS_PAGE_MISS)",
     },
     "iMC.PCT_REQUESTS_PAGE_MISS": {
          "Box": "iMC",
          "Category": "iMC CAS Events",
          "Defn": "Percentage of memory requests that resulted in Page Misses",
          "Desc": "Percent Requests Page Miss",
          "Equation": "PRE_COUNT.PAGE_MISS / (CAS_COUNT.RD + CAS_COUNT.WR)",
     },
     "iMC.PCT_WR_REQUESTS": {
          "Box": "iMC",
          "Category": "iMC WPQ Events",
          "Defn": "Percentage of write requests from total requests.",
          "Desc": "Percent Write Requests",
          "Equation": "WPQ_INSERTS / (RPQ_INSERTS + WPQ_INSERTS)",
     },

# R2PCIe:
     "R2PCIe.CYC_USED_DNEVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Cycles Used in the Down direction, Even polarity",
          "Desc": "Cycles Used Down and Even",
          "Equation": "RING_BL_USED.CCW_EVEN / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "R2PCIe.CYC_USED_DNODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Cycles Used in the Down direction, Odd polarity",
          "Desc": "Cycles Used Down and Odd",
          "Equation": "RING_BL_USED.CCW_ODD / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "R2PCIe.CYC_USED_UPEVEN": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Cycles Used in the Up direction, Even polarity",
          "Desc": "Cycles Used Up and Even",
          "Equation": "RING_BL_USED.CW_EVEN / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "R2PCIe.CYC_USED_UPODD": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Cycles Used in the Up direction, Odd polarity",
          "Desc": "Cycles Used Up and Odd",
          "Equation": "RING_BL_USED.CW_ODD / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "R2PCIe.RING_THRU_DNEVEN_BYTES": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Ring throughput in the Down direction, Even polarity in Bytes",
          "Desc": "Ring Throughput Down and Even",
          "Equation": "RING_BL_USED.CCW_EVEN * 32",
          "Obscure": 1,
     },
     "R2PCIe.RING_THRU_DNODD_BYTES": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Ring throughput in the Down direction, Odd polarity in Bytes",
          "Desc": "Ring Throughput Down and Odd",
          "Equation": "RING_BL_USED.CCW_ODD * 32",
          "Obscure": 1,
     },
     "R2PCIe.RING_THRU_UPEVEN_BYTES": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Ring throughput in the Up direction, Even polarity in Bytes",
          "Desc": "Ring Throughput Up and Even",
          "Equation": "RING_BL_USED.CW_EVEN * 32",
          "Obscure": 1,
     },
     "R2PCIe.RING_THRU_UPODD_BYTES": {
          "Box": "R2PCIe",
          "Category": "R2PCIe RING Events",
          "Defn": "Ring throughput in the Up direction, Odd polarity in Bytes",
          "Desc": "Ring Throughput Up and Odd",
          "Equation": "RING_BL_USED.CW_ODD * 32",
          "Obscure": 1,
     },

# QPI_LL:
     "QPI_LL.DATA_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "Data received from QPI in bytes  ( = DRS + NCB Data messages received from QPI)",
          "Desc": "Data From QPI",
          "Equation": "DRS_DATA_MSGS_FROM_QPI + NCB_DATA_MSGS_FROM_QPI",
     },
     "QPI_LL.DATA_FROM_QPI_TO_HA_OR_IIO": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Defn": "Data received from QPI forwarded to HA or IIO.  Expressed in Bytes",
          "Desc": "Data From QPI To HA or IIO",
          "Equation": "DATA_FROM_QPI - DATA_FROM_QPI_TO_LLC",
          "Broken": 1,
     },
     "QPI_LL.DATA_FROM_QPI_TO_LLC": {
          "Box": "QPI_LL",
          "Category": "QPI_LL DIRECT2CORE Events",
          "Defn": "Data received from QPI forwarded to LLC.  Expressed in Bytes",
          "Desc": "Data From QPI To LLC",
          "Equation": "DIRECT2CORE.SUCCESS * 64",
          "Broken": 1,
     },
     "QPI_LL.DATA_FROM_QPI_TO_NODEx": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "Data packets received from QPI sent to Node ID 'x'.  Expressed in bytes",
          "Desc": "Data From QPI To Node x",
          "Equation": "DRS_DataC_FROM_QPI_TO_NODEx + DRS_WRITE_FROM_QPI_TO_NODEx + NCB_DATA_FROM_QPI_TO_NODEx",
     },
     "QPI_LL.DRS_DATA_MSGS_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Defn": "DRS Data Messages From QPI in bytes",
          "Desc": "DRS Data Messages From QPI",
          "Equation": "(RxL_FLITS_G1.DRS_DATA * 8)",
          "Obscure": 1,
     },
     "QPI_LL.DRS_DataC_FROM_QPI_TO_NODEx": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS DataC packets received from QPI sent to Node ID 'x'.  Expressed in bytes",
          "Desc": "DRS DataC From QPI To Node x",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80}) * 64",
          "Filter": "QPIMask0[17:0],QPIMatch0[17:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_FULL_CACHELINE_MSGS_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS Full Cacheline Data Messages From QPI in bytes",
          "Desc": "DRS Full Cacheline Data Messages From QPI",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00,Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_F_OR_E_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS response in F or E states received from QPI in bytes.  To calculate the total data response for each cache line state, it's necessary to add the contribution from three flavors {DataC, DataC_FrcAckCnflt, DataC_Cmp} of data response packets for each cache line state.",
          "Desc": "DRS Data in F or E From QPI",
          "Equation": "((CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C40, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF })  + (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x4, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) + (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C20, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x1, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF })) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_M_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS response in M state received from QPI in bytes",
          "Desc": "DRS Data in M From QPI",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0, Q_Py_PCI_PMON_PKT_MATCH1[19:16]=0x8, Q_Py_PCI_PMON_PKT_MASK1[19:16]=0xF }) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0],QPIMask1[19:16],QPIMatch1[19:16]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_PTL_CACHELINE_MSGS_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS Partial Cacheline Data Messages From QPI in bytes",
          "Desc": "DRS Partial Cacheline Data Messages From QPI",
          "Equation": "(CTO_COUNT with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1D00, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1F00}) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_WB_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS writeback packets received from QPI in bytes.  This is the sum of Wb{I,S,E} DRS packets",
          "Desc": "DRS Writeback From QPI",
          "Equation": "DRS_WbI_FROM_QPI + DRS_WbS_FROM_QPI + DRS_WbE_FROM_QPI",
          "Obscure": 1,
     },
     "QPI_LL.DRS_WRITE_FROM_QPI_TO_NODEx": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS Data packets (Any  - DataC) received from QPI sent to Node ID 'x'.  Expressed in bytes",
          "Desc": "DRS Data From QPI To Node x",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00} - CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1C00,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80}) * 64",
          "Filter": "QPIMask0[17:0],QPIMatch0[17:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_WbE_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS writeback 'change to E state' packets received from QPI in bytes",
          "Desc": "DRS WbE From QPI",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CC0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_WbI_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS writeback 'change to I state' packets received from QPI in bytes",
          "Desc": "DRS WbI From QPI",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1C80, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0]",
          "Obscure": 1,
     },
     "QPI_LL.DRS_WbS_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "DRS writeback 'change to S state' packets received from QPI in bytes",
          "Desc": "DRS WbSFrom QPI",
          "Equation": "(CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0[12:0]=0x1CA0, Q_Py_PCI_PMON_PKT_MASK0[12:0]=0x1FE0}) * 64",
          "Filter": "QPIMask0[12:0],QPIMatch0[12:0]",
          "Obscure": 1,
     },
     "QPI_LL.NCB_DATA_FROM_QPI_TO_NODEx": {
          "Box": "QPI_LL",
          "Category": "QPI_LL CTO Events",
          "Defn": "NCB Data packets (Any - Interrupts) received from QPI sent to Node ID 'x'.  Expressed in bytes",
          "Desc": "NCB Data From QPI To Node x",
          "Equation": "((CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1800,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FE00}) - (CTO_COUNT  with:{Q_Py_PCI_PMON_PKT_MATCH0{[12:0],dnid}={0x1900,x},Q_Py_PCI_PMON_PKT_MASK0[17:0]=0x3FF80})) * 64",
          "Filter": "QPIMask0[17:0],QPIMatch0[17:0]",
          "Obscure": 1,
     },
     "QPI_LL.NCB_DATA_MSGS_FROM_QPI": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Defn": "NCB Data Messages From QPI in bytes",
          "Desc": "NCB Data Messages From QPI",
          "Equation": "(RxL_FLITS_G2.NCB_DATA * 8)",
          "Obscure": 1,
     },
     "QPI_LL.PCT_LINK_FULL_POWER_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_RX Events",
          "Defn": "Percent of Cycles the QPI link is at Full Power",
          "Desc": "Percent Link Full Power Cycles",
          "Equation": "RxL0_POWER_CYCLES / CLOCKTICKS",
          "Obscure": 1,
     },
     "QPI_LL.PCT_LINK_HALF_DISABLED_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER_RX Events",
          "Defn": "Percent of Cycles the QPI link in power mode where half of the lanes are disabled.",
          "Desc": "Percent Link Half Disabled Cycles",
          "Equation": "RxL0P_POWER_CYCLES / CLOCKTICKS",
          "Obscure": 1,
     },
     "QPI_LL.PCT_LINK_SHUTDOWN_CYCLES": {
          "Box": "QPI_LL",
          "Category": "QPI_LL POWER Events",
          "Defn": "Percent of Cycles the QPI link is Shutdown",
          "Desc": "Percent Link Shutdown Cycles",
          "Equation": "L1_POWER_CYCLES / CLOCKTICKS",
          "Obscure": 1,
     },
     "QPI_LL.QPI_LINK_UTIL": {
          "Box": "QPI_LL",
          "Category": "QPI_LL FLITS_RX Events",
          "Defn": "Percentage of cycles that QPI Link was utilized.  Calculated from 1 - Number of idle flits - time the link was 'off'",
          "Desc": "QPI Link Utilization",
          "Equation": "(RxL_FLITS_G0.DATA + RxL_FLITS_G0.NON_DATA) / (2 * CLOCKTICKS)",
     },

# PCU:
     "PCU.PCT_FREQ_BAND0": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
          "Equation": "FREQ_BAND0_CYCLES / CLOCKTICKS"
     },
     "PCU.PCT_FREQ_BAND1": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
          "Equation": "FREQ_BAND1_CYCLES / CLOCKTICKS"
     },
     "PCU.PCT_FREQ_BAND2": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
          "Equation": "FREQ_BAND2_CYCLES / CLOCKTICKS"
     },
     "PCU.PCT_FREQ_BAND3": {
          "Box": "PCU",
          "Category": "PCU FREQ_RESIDENCY Events",
          "Defn": "Counts the percent that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter.  One can use all four counters with this event, so it is possible to track up to 4 configurable bands.  One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
          "Desc": "Frequency Residency",
          "Notes": "The PMON control registers in the PCU only update on a frequency transition.   Changing the measuring threshold during a sample interval may introduce errors in the counts.   This is especially true when running at a constant frequency for an extended period of time.  There is a corner case here: we set this code on the GV transition.  So, if we never GV we will never call this code.  This event does not include transition times.  It is handled on fast path.",
          "Equation": "FREQ_BAND3_CYCLES / CLOCKTICKS"
     },
     "PCU.PCT_FREQ_CURRENT_LTD": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Defn": "Percent of Cycles the Max Frequency is limited by current",
          "Desc": "Percent of Cycles Frequency Current Limited",
          "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS",
     },
     "PCU.PCT_FREQ_OS_LTD": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Defn": "Percent of Cycles the Max Frequency is limited by the OS",
          "Desc": "Percent of Cycles Frequency OS Limited",
          "Equation": "FREQ_MAX_OS_CYCLES / CLOCKTICKS",
     },
     "PCU.PCT_FREQ_POWER_LTD": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Defn": "Percent of Cycles the Max Frequency is limited by power",
          "Desc": "Percent of Cycles Frequency Power Limited",
          "Equation": "FREQ_MAX_POWER_CYCLES / CLOCKTICKS",
     },
     "PCU.PCT_FREQ_THERMAL_LTD": {
          "Box": "PCU",
          "Category": "PCU FREQ_MAX_LIMIT Events",
          "Defn": "Percent of Cycles the Max Frequency is limited by thermal issues",
          "Desc": "Percent of Cycles Frequency Thermal Limited",
          "Equation": "FREQ_MAX_CURRENT_CYCLES / CLOCKTICKS",
     },

# CBO:
     "CBO.AVG_INGRESS_DEPTH": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Defn": "Average Depth of the Ingress Queue through the sample interval",
          "Desc": "Average Ingress Depth",
          "Equation": "RxR_OCCUPANCY.IRQ  / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "CBO.AVG_INGRESS_LATENCY": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks",
          "Desc": "Average Ingress Latency",
          "Equation": "RxR_OCCUPANCY.IRQ / RxR_INSERTS.IRQ",
          "Obscure": 1,
     },
     "CBO.AVG_INGRESS_LATENCY_WHEN_NE": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Defn": "Average Latency of Requests through the Ingress Queue in Uncore Clocks when Ingress Queue has at least one entry",
          "Desc": "Average Latency in Non-Empty Ingress",
          "Equation": "RxR_OCCUPANCY.IRQ / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}",
          "Obscure": 1,
     },
     "CBO.AVG_TOR_DRDS_MISS_WHEN_NE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Average Number of Data Read Entries that Miss the LLC when the TOR is not empty.",
          "Desc": "Average Data Read Misses in Non-Empty TOR",
          "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182",
          "Filter": "CBoFilter[31:23]",
          "Obscure": 1,
     },
     "CBO.AVG_TOR_DRDS_WHEN_NE": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Average Number of Data Read Entries when the TOR is not empty.",
          "Desc": "Average Data Reads in Non-Empty TOR",
          "Equation": "(TOR_OCCUPANCY.OPCODE / COUNTER0_OCCUPANCY with:{edge_det=1,thresh=0x1}) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182",
          "Filter": "CBoFilter[31:23]",
          "Obscure": 1,
     },
     "CBO.AVG_TOR_DRD_HIT_LATENCY": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Average Latency of Data Reads through the TOR that hit the LLC",
          "Desc": "Data Read Hit Latency through TOR",
          "Equation": "((TOR_OCCUPANCY.OPCODE  - TOR_OCCUPANCY.MISS_OPCODE) / (TOR_INSERTS.OPCODE - TOR_INSERTS.MISS_OPCODE)) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182",
          "Filter": "CBoFilter[31:23]",
          "Obscure": 1,
     },
     "CBO.AVG_TOR_DRD_LATENCY": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Average Latency of Data Read Entries making their way through the TOR",
          "Desc": "Data Read Latency through TOR",
          "Equation": "(TOR_OCCUPANCY.OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182",
          "Filter": "CBoFilter[31:23]",
          "Obscure": 1,
     },
     "CBO.AVG_TOR_DRD_MISS_LATENCY": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Average Latency of Data Reads through the TOR that miss the LLC",
          "Desc": "Data Read Miss Latency through TOR",
          "Equation": "(TOR_OCCUPANCY.MISS_OPCODE / TOR_INSERTS.MISS_OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x182",
          "Filter": "CBoFilter[31:23]",
          "Obscure": 1,
     },
     "CBO.CYC_INGRESS_BLOCKED": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Defn": "Cycles the Ingress Request Queue arbiter was Blocked",
          "Desc": "Cycles Ingress Blocked",
          "Equation": "RxR_EXT_STARVED.IRQ  / SAMPLE_INTERVAL",
          "Obscure": 1,
     },
     "CBO.CYC_USED_DNEVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Cycles Used in the Down direction, Even polarity",
          "Desc": "Cycles Used Down and Even",
          "Equation": "RING_BL_USED.DOWN_EVEN / SAMPLE_INTERVAL",
          "Obscure": 1
     },
     "CBO.CYC_USED_DNODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Cycles Used in the Down direction, Odd polarity",
          "Desc": "Cycles Used Down and Odd",
          "Equation": "RING_BL_USED.DOWN_ODD / SAMPLE_INTERVAL",
          "Obscure": 1
     },
     "CBO.CYC_USED_UPEVEN": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Cycles Used in the Up direction, Even polarity",
          "Desc": "Cycles Used Up and Even",
          "Equation": "RING_BL_USED.UP_EVEN / SAMPLE_INTERVAL",
          "Obscure": 1
     },
     "CBO.CYC_USED_UPODD": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Cycles Used in the Up direction, Odd polarity",
          "Desc": "Cycles Used Up and Odd",
          "Equation": "RING_BL_USED.UP_ODD / SAMPLE_INTERVAL",
          "Obscure": 1
     },
     "CBO.INGRESS_REJ_V_INS": {
          "Box": "CBO",
          "Category": "CBO INGRESS Events",
          "Defn": "Ratio of Ingress Request Entries that were rejected vs. inserted",
          "Desc": "Ingress Rejects vs. Inserts",
          "Equation": "RxR_INSERTS.IRQ_REJECTED  / RxR_INSERTS.IRQ",
          "Obscure": 1
     },
     "CBO.LLC_DRD_MISS_PCT": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Defn": "LLC Data Read miss ratio",
          "Desc": "LLC DRD Miss Ratio",
          "Equation": "LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x1 / LLC_LOOKUP.DATA_READ with:Cn_MSR_PMON_BOX_FILTER.state=0x1F",
          "Filter": "CBoFilter[22:18]",
          "Obscure": 1, # too much multiplexing error
     },
     "CBO.LLC_PCIE_DATA_BYTES": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "LLC Miss Data from PCIe in Number of Bytes",
          "Desc": "LLC Miss Data from PCIe",
          "Equation": "TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19C * 64",
          "Filter": "CBoFilter[31:23]",
          "Broken": 1,
     },
     "CBO.LLC_RFO_MISS_PCT": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "LLC RFO Miss Ratio",
          "Desc": "LLC RFO Miss Ratio",
          "Equation": "(TOR_INSERTS.MISS_OPCODE / TOR_INSERTS.OPCODE) with:Cn_MSR_PMON_BOX_FILTER.opc=0x180",
          "Filter": "CBoFilter[31:23]",
     },
     "CBO.MEM_WB_BYTES": {
          "Box": "CBO",
          "Category": "CBO CACHE Events",
          "Defn": "Data written back to memory in Number of Bytes",
          "Desc": "Memory Writebacks",
          "Equation": "LLC_VICTIMS.M_STATE * 64",
     },
     "CBO.PCIE_DATA_BYTES": {
          "Box": "CBO",
          "Category": "CBO TOR Events",
          "Defn": "Data from PCIe in Number of Bytes",
          "Desc": "PCIe Data Traffic",
          "Equation": "(TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x194 + TOR_INSERTS.OPCODE with:Cn_MSR_PMON_BOX_FILTER.opc=0x19c) * 64",
          "Filter": "CBoFilter[31:23]",
          "Broken": 1,
     },
     "CBO.RING_THRU_DNEVEN_BYTES": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Ring throughput in the Down direction, Even polarity in Bytes",
          "Desc": "Ring Throughput Down and Even",
          "Equation": "RING_BL_USED.DOWN_EVEN * 32",
          "Obscure": 1,
     },
     "CBO.RING_THRU_DNODD_BYTES": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Ring throughput in the Down direction, Odd polarity in Bytes",
          "Desc": "Ring Throughput Down and Odd",
          "Equation": "RING_BL_USED.DOWN_ODD * 32",
          "Obscure": 1,
     },
     "CBO.RING_THRU_UPEVEN_BYTES": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Ring throughput in the Up direction, Even polarity in Bytes",
          "Desc": "Ring Throughput Up and Even",
          "Equation": "RING_BL_USED.UP_EVEN * 32",
          "Obscure": 1,
     },
     "CBO.RING_THRU_UPODD_BYTES": {
          "Box": "CBO",
          "Category": "CBO RING Events",
          "Defn": "Ring throughput in the Up direction, Odd polarity in Bytes",
          "Desc": "Ring Throughput Up and Odd",
          "Equation": "RING_BL_USED.UP_ODD * 32",
          "Obscure": 1,
     },
}
categories = (
     "CBO CACHE Events",
     "CBO EGRESS Events",
     "CBO INGRESS Events",
     "CBO INGRESS_RETRY Events",
     "CBO ISMQ Events",
     "CBO MISC Events",
     "CBO OCCUPANCY Events",
     "CBO RING Events",
     "CBO TOR Events",
     "CBO UCLK Events",
     "HA ADDR_OPCODE_MATCH Events",
     "HA AD_EGRESS Events",
     "HA AK_EGRESS Events",
     "HA BL_EGRESS Events",
     "HA CONFLICTS Events",
     "HA DIRECT2CORE Events",
     "HA DIRECTORY Events",
     "HA IMC_MISC Events",
     "HA IMC_WRITES Events",
     "HA OUTBOUND_TX Events",
     "HA QPI_IGR_CREDITS Events",
     "HA REQUESTS Events",
     "HA RPQ_CREDITS Events",
     "HA TAD Events",
     "HA TRACKER Events",
     "HA UCLK Events",
     "HA WPQ_CREDITS Events",
     "PCU CORE_C_STATE_TRANSITION Events",
     "PCU FREQ_MAX_LIMIT Events",
     "PCU FREQ_MIN_LIMIT Events",
     "PCU FREQ_RESIDENCY Events",
     "PCU FREQ_TRANS Events",
     "PCU MEMORY_PHASE_SHEDDING Events",
     "PCU PCLK Events",
     "PCU POWER_STATE_OCC Events",
     "PCU PROCHOT Events",
     "PCU VOLT_TRANS Events",
     "PCU VR_HOT Events",
     "QPI_LL CFCLK Events",
     "QPI_LL CRC_ERRORS_RX Events",
     "QPI_LL CTO Events",
     "QPI_LL DIRECT2CORE Events",
     "QPI_LL FLITS_RX Events",
     "QPI_LL FLITS_TX Events",
     "QPI_LL POWER Events",
     "QPI_LL POWER_RX Events",
     "QPI_LL POWER_TX Events",
     "QPI_LL RXQ Events",
     "QPI_LL RX_CREDITS_CONSUMED Events",
     "QPI_LL TXQ Events",
     "QPI_LL VNA_CREDIT_RETURN Events",
     "R2PCIe EGRESS Events",
     "R2PCIe INGRESS Events",
     "R2PCIe RING Events",
     "R2PCIe UCLK Events",
     "R3QPI EGRESS Events",
     "R3QPI IIO_CREDITS Events",
     "R3QPI INGRESS Events",
     "R3QPI LINK_VN0_CREDITS Events",
     "R3QPI LINK_VNA_CREDITS Events",
     "R3QPI RING Events",
     "R3QPI UCLK Events",
     "UBOX EVENT_MSG Events",
     "UBOX FILTER_MATCH Events",
     "UBOX LOCK Events",
     "iMC ACT Events",
     "iMC CAS Events",
     "iMC DRAM_PRE_ALL Events",
     "iMC DRAM_REFRESH Events",
     "iMC ECC Events",
     "iMC MAJOR_MODES Events",
     "iMC POWER Events",
     "iMC PRE Events",
     "iMC PREEMPTION Events",
     "iMC RPQ Events",
     "iMC WPQ Events",
);
