﻿@article{
PV:altos-da,
   Journal = {Altos Design Automation, Inc.},
      Year = {http://www.altos-da.com} }



@article{
PV:blaze-dfm,
   Journal = {Blaze DFM, Inc.},
      Year = {http://www.blaze-dfm.com} }



@article{
PV:extreme-da,
   Journal = {Extreme DA, Inc.},
      Year = {http://www.extreme-da.com} }



@book{
PV:MIT-book01,
   Author = {A.Chandrakasan and W.Bowhill and F.Fox},
   Title = {Design of high-performance microprocessor circuits},
   Publisher = {Wiley, John & Sons, Incorporated},
      Year = {2001} }



@book{
PV:APA65,
   Author = {A.Papoulis},
   Title = {Probability,random variable and dtochastic processes},
   Publisher = {McGraw Hill },
   Address = {NY},
      Year = {1965} }



@inproceedings{
PV:ANY+02,
   Author = {Acar, E. and Nassif, S. and Ying, Liu and Pileggi, L. T.},
   Title = {Time-domain simulation of variational interconnect models},
   BookTitle = {International Symposium on Quality Electronic Design},
   Pages = {419-424},
   Abstract = {Interconnect parameter variations are more significant in the nanometer regime due to the increase in relative tolerances for upcoming integration technologies. As several variability studies indicate the significant role of the interconnect on system performance, the analysis of linear models is extremely crucial. Contrary to devices, the extreme case scenarios do not apply for context-dependent interconnect, necessitating a statistical analysis framework. A previously proposed approach to represent interconnect models in terms of global interconnect parameters is necessary in such frameworks. In this paper we present efficient ways of simulating these variational interconnect models in the presence of nonlinear devices. We demonstrate our methodology by incorporating variational interconnect models into transistor-level simulation with accurate nonlinear device models.},
   Keywords = {circuit simulation
integrated circuit interconnections
integrated circuit modelling
statistical analysis
time-domain analysis
variational techniques
context-dependent interconnect
global interconnect parameters
interconnect parameter variations
linear model analysis
nonlinear device models
process tolerances
statistical analysis framework
system performance variability
time-domain simulation
transistor-level simulation
variational interconnect models},
   Year = {2002} }



@inproceedings{
PV:ABZ03b,
   Author = {Agarwal, A. and Blaauw, D. and Zolotov, V.},
   Title = {Statistical clock skew analysis considering intra-die process variations},
   BookTitle = {ICCAD},
   Pages = {914-921},
   Abstract = {With shrinking cycle times, clock skew has become an increasingly difficult and important problem for high performance designs. Traditionally, clock skew has been analyzed using case-files which cannot model intra-die process variations and hence result in a very optimistic skew analysis. In this paper, we present a statistical skew analysis method to model intra-die process variations. We first present a formal model of the statistical clock skew problem and then propose an algorithm which is based on propagation of joint probability distribution functions in a bottom up fashion in a clock tree. The analysis accounts for topological correlations between path delays and has linear run time with the size of the clock tree. The proposed method was tested on several large clock tree circuits, including a clock tree from a large industrial high-performance microprocessor. The results are compared with Monte Carlo simulation for accuracy comparison and demonstrate the need for statistical analysis of clock skew.},
   Keywords = {Monte Carlo methods
clocks
statistical analysis
statistical distributions
JPDFs
Monte Carlo simulation
case files
clock tree circuits
high performance microprocessor
intra-die process variations
joint probability distribution functions
linear run time
path delays
performance designs
shrinking cycle times
statistical clock skew analysis
topological correlations},
   Year = {2003} }



@inproceedings{
PV:ABZ03a,
   Author = {Agarwal, A. and Blaauw, D. and Zolotov, V.},
   Title = {Statistical timing analysis for intra-die process variations with spatial correlations},
   BookTitle = {International Conference on Computer Aided Design},
   Pages = {900-907},
   Abstract = {Process variations have become a critical issue in performance verification of high-performance designs. We present a new, statistical timing analysis method that accounts for inter- and intra-die process variations and their spatial correlations. Since statistical timing analysis has an exponential run time complexity, we propose a method whereby a statistical bound on the probability distribution function of the exact circuit delay is computed with linear run time. First, we develop a model for representing inter- and intra-die variations and their spatial correlations. Using this model, we then show how gate delays and arrival times can be represented as a sum of components, such that the correlation information between arrival times and gate delays is preserved. We then show how arrival times are propagated and merged in the circuit to obtain an arrival time distribution that is an upper bound on the distribution of the exact circuit delay. We prove the correctness of the bound and also show how the bound can be improved by propagating multiple arrival times. The proposed algorithms were implemented and tested on a set of benchmark circuits under several process variation scenarios. The results were compared with Monte Carlo simulation and show an accuracy of 3.32% on average over all test cases.},
   Keywords = {Monte Carlo methods
benchmark testing
computational complexity
delay circuits
statistical analysis
statistical distributions
Monte Carlo simulation
benchmark circuits
circuit delay
exponential run time complexity
gate delays
inter-die process variations
intra-die process variations
probability distribution function
spatial correlations
statistical timing analysis},
   Year = {2003} }



@inproceedings{
PV:ABZ+03a,
   Author = {Agarwal, A. and Blaauw, D. and Zolotov, V. and Sundareswaran, S. and Min, Zhao and Gala, K. and Panda, R.},
   Title = {Statistical delay computation considering spatial correlations},
   BookTitle = {Asia and South Pacific Design Automation Conference},
   Pages = {271-276},
   Abstract = {Process variation has become a significant concern for static timing analysis. In this paper, we present a new method for path-based statistical timing analysis. We first propose a method for modeling inter- and intra-die device length variations. Based on this model, we then present an efficient method for computing the total path delay probability distribution using a combination of device length enumeration for inter-die variation and an analytical approach for intra-die variation. We also propose a simple and effective model of spatial correlation of intra-die device length variation. The analysis is then extended to include spatial correlation. We test the proposed methods on paths from an industrial high-performance microprocessor and present comparisons with traditional path analysis which does not distinguish between inter- and intra-die variations. The characteristics of the device length distributions were obtained from measured data of 8 test chips with a total of 17688 device length measurements. Spatial correlation data was also obtained from these measurements. We demonstrate the accuracy of the proposed approach by comparing our results with Monte-Carlo simulation.},
   Keywords = {Monte Carlo methods
correlation methods
delay estimation
integrated circuit testing
microprocessor chips
timing
Monte-Carlo simulation
device length enumeration
inter-die device length variations
intra-die device length variations
microprocessor
path-based statistical timing analysis
process variation
spatial correlation
spatial correlations
static timing analysis
statistical delay computation
test chips
total path delay probability distribution},
   Year = {2003} }



@inproceedings{
PV:ABZ+03b,
   Author = {Agarwal, A. and Blaauw, D. and Zolotov, V. and Vrudhula, S.},
   Title = {Statistical timing analysis using bounds },
   BookTitle = {Design, Automation and Test in Europe Conference and Exhibition},
   Pages = {62-67},
   Abstract = {The growing impact of within-die process variation has created the need for statistical timing analysis, where gate delays are modeled as random variables. Statistical timing analysis has traditionally suffered from exponential run time complexity with circuit size, due to the dependencies created by reconverging paths in the circuit. In this paper, we propose a new approach to statistical timing analysis which uses statistical bounds. First, we provide a formal definition of the statistical delay of a circuit and derive a statistical timing analysis method from this definition. Since this method for finding the exact statistical delay has exponential run time complexity with circuit size, we also propose a new method for computing statistical bounds which has linear run time complexity. We prove the correctness of the proposed bounds. Since we provide both a lower and upper bound on the true statistical delay, we can determine the quality of the bounds. The proposed methods were implemented and tested on benchmark circuits. The results demonstrate that the proposed bounds have only a small error.},
   Keywords = {boundary-value problems
formal verification
integrated circuit design
logic design
logic simulation
statistical analysis
timing
IC verification
circuit reconverging paths
circuit size dependent run time complexity
circuit statistical delay definition
gate delay modeling
statistical bounds
statistical timing analysis
within-die process variation},
   Year = {2003} }



@inproceedings{
PV:ABZ+05,
   Author = {Agarwal, A. and Chopra, K. and Blaauw, D. and Zolotov, V.},
   Title = {Circuit optimization using statistical static timing analysis},
   BookTitle = {Design Automation Conference},
   Pages = {321-324},
   Abstract = {In this paper, we propose a new sensitivity based, statistical gate sizing method. Since circuit optimization effects the entire shape of the circuit delay distribution, it is difficult to capture the quality of a distribution with a single metric. Hence, we first introduce a new objective function that provides an effective measure for the quality of a delay distribution for both ASIC and high performance designs. We then propose an efficient and exact sensitivity based pruning algorithm based on a newly proposed theory of perturbation bounds. A heuristic approach for sensitivity computation which relies on efficient computation of statistical slack is then introduced. Finally, we show how the pruning and statistical slack based approaches can be combined to obtain nearly identical results compared with the brute-force approach but with an average run-time improvement of up to 89/spl times/. We also compare the optimization results against that of a deterministic optimizer and show an improvement up to 16% in the 99-percentile circuit delay and up to 31% in the standard deviation for the same circuit area.},
   Keywords = {application specific integrated circuits
circuit optimisation
integrated circuit design
perturbation theory
sensitivity analysis
statistical analysis
ASIC designs
circuit delay distribution
circuit optimization
deterministic optimizer
high performance designs
perturbation bounds
sensitivity based pruning algorithm
statistical gate sizing method
statistical slack
statistical static timing analysis},
   Year = {2005} }



@inproceedings{
PV:ADB04,
   Author = {Agarwal, A. and Dartu, F. and Blaauw, D.},
   Title = {Statistical gate delay model considering multiple input switching},
   BookTitle = {Design Automation Conference},
   Pages = {658-663},
   Abstract = {NA},
      Year = {2004} }



@article{
PV:APM+05,
   Author = {Agarwal, A. and Paul, B. C. and Mukhopadhyay, S. and Roy, K.},
   Title = {Process variation in embedded memories: failure analysis and variation aware architecture},
   Journal = {IEEE Journal of Solid-State Circuits},
   Volume = {40},
   Number = {9},
   Pages = {1804-1814},
   Note = {0018-9200},
   Abstract = {With scaling of device dimensions, microscopic variations in number and location of dopant atoms in the channel region of the device induce increasingly limiting electrical deviations in device characteristics such as threshold voltage. These atomic-level intrinsic fluctuations cannot be eliminated by external control of the manufacturing process and are most pronounced in minimum-geometry transistors commonly used in area-constrained circuits such as SRAM cells. Consequently, a large number of cells in a memory are expected to be faulty due to process variations in sub-50-nm technologies. This paper analyzes SRAM cell failures under process variation and proposes new variation-aware cache architecture suitable for high performance applications. The proposed architecture adaptively resizes the cache to avoid faulty cells, thereby improving yield. This scheme is transparent to processor architecture and has negligible energy and area overhead. Experimental results on a 32 K direct map L1 cache show that the proposed architecture can achieve 93% yield compared to its original 33%. The Simplescalar simulation shows that designing the data and instruction cache using the proposed architecture results in 1.5% and 5.7% average CPU performance loss (over SPEC 2000 benchmarks), respectively, for the chips with maximum number of faulty cells which can be tolerated by our proposed scheme.},
   Keywords = {SRAM chips
cache storage
computer architecture
failure analysis
integrated circuit reliability
network analysis
SRAM cell failures
SRAM chips
cache resizing
cache storage
computer architecture
embedded memories
failure analysis
integrated circuit
network analysis
process variation
processor architecture
variation-aware cache architecture
Resizing
SRAM failures
variation aware cache
yield},
   Year = {2005} }



@article{
PV:ABZ03c,
   Author = {Agarwal, A. and Zolotov, V. and Blaauw, D. T.},
   Title = {Statistical timing analysis using bounds and selective enumeration},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {22},
   Number = {9},
   Pages = {1243-1260},
   Note = {0278-0070},
   Abstract = {The growing impact of within-die process variation has created the need for statistical timing analysis, where gate delays are modeled as random variables. Statistical timing analysis has traditionally suffered from exponential run time complexity with circuit size, due to arrival time dependencies created by reconverging paths in the circuit. In this paper, we propose a new approach to statistical timing analysis which uses statistical bounds and selective enumeration to refine these bounds. First, we provide a formal definition of the statistical delay of a circuit and derive a statistical timing analysis method from this definition. Since this method for finding the exact statistical delay has exponential run time complexity with circuit size, we also propose a new method for computing statistical bounds which has linear run time complexity. We prove the correctness of the proposed bounds. Since we provide both a lower and upper bound on the true statistical delay, we can determine the quality of the bounds. If the computed bounds are not sufficiently close to each other, we propose a heuristic to iteratively improve the bounds using selective enumeration of the sample space with additional run time. The proposed methods were implemented and tested on benchmark circuits. The results demonstrate that the proposed bounds have only a small error, which can be further reduced using selective enumeration with modest additional run time.},
   Keywords = {integrated circuit modelling
statistical analysis
timing
circuit delay
selective enumeration
statistical bounds
statistical timing analysis
within-die process variation},
   Year = {2003} }



@inproceedings{
PV:ASB03,
   Author = {Agarwal, K. and Sylvester, D. and Blaauw, D.},
   Title = {Simple metrics for slew rate of RC circuits based on two circuit moments},
   BookTitle = {Design Automation Conference},
   Pages = {950-953},
   Abstract = {In this paper, we introduce simple metrics for the slew rate of an RC circuit based on the first two circuit moments. We develop two new slew metrics, S2M (slew with 2 moments) and scaled S2M, that provide high accuracy with the advantage of simple closed form expressions. S2M is very accurate for middle and far end nodes but it does not perform as well for near end nodes. Scaled S2M is developed to improve upon S2M for near end nodes and is shown to be highly accurate for near as well as far end nodes. For a large set of nets from an industrial 0.18 /spl mu/m microprocessor, S2M matches SPICE within 2% on average with 78% of the sinks having less than 1% error. For the same test cases, the average error for scaled S2M is less than 3% with 99.4% of the nodes showing less than 5% error.},
   Keywords = {RC circuits
delay estimation
integrated circuit interconnections
integrated circuit layout
integrated circuit modelling
RC circuit
S2M
circuit moment
end node
scaled S2M
slew metrics
slew rate
slew with 2 moment},
   Year = {2003} }



@inproceedings{
PV:ASB+04,
   Author = {Agarwal, K. and Sylvester, D. and Blaauw, D. and Liu, F. and Nassif, S. and Vrudhula, S.},
   Title = {Variational delay metrics for interconnect timing analysis},
   BookTitle = {Design Automation Conference},
   Pages = {381-384},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:BLP+06,
   Author = {Akgul, B and Chakrapani, Lakshmi N. and Korkmaz, Pinar and Palem, Krishna V.},
   Title = {Probabilistic CMOS Technology: A Survey and Future Directions},
   BookTitle = {the IFIP International Conference on Very Large Scale Integration (VLSI-SoC)},
      Year = {2006} }



@inproceedings{
PV:ANR+04,
   Author = {Arvind and Nikhil, R. S. and Rosenband, D. L. and Dave, N.},
   Title = {High-level synthesis: an essential ingredient for designing complex ASICs},
   BookTitle = {IEEE/ACM International Conference on  Computer Aided Design },
   Pages = {775-782},
   Abstract = {It is common wisdom that synthesizing hardware from higher-level descriptions than Verilog incurs a performance penalty. The case study here shows that this need not be the case. If the higher-level language has suitable semantics, it is possible to synthesize hardware that is competitive with hand-written Verilog RTL. Differences in the hardware quality are dominated by architecture differences and, therefore, it is more important to explore multiple hardware architectures. This exploration is not practical without quality synthesis from higher-level languages.},
   Keywords = {application specific integrated circuits
hardware description languages
high level languages
high level synthesis
integrated circuit design
ASIC
Verilog RTL
hardware architectures
hardware quality
hardware synthesis
high level languages
high-level synthesis
quality synthesis},
   Year = {2004} }



@article{
PV:ASE98,
   Author = {Asenov, A.},
   Title = {Random dopant induced threshold voltage lowering and fluctuations in sub-0.1 &mu;m MOSFET's: A 3-D &ldquo;atomistic&rdquo; simulation study},
   Journal = {IEEE Transactions on Electron Devices},
   Volume = {45},
   Number = {12},
   Pages = {2505-2513},
   Note = {0018-9383},
   Abstract = {A three-dimensional (3-D) &ldquo;atomistic&rdquo; simulation study of random dopant induced threshold voltage lowering and fluctuations in sub-0.1 &mu;m MOSFETs is presented. For the first time a systematic analysis of random dopant effects down to an individual dopant level was carried out in 3-D on a scale sufficient to provide quantitative statistical predictions. Efficient algorithms based on a single multigrid solution of the Poisson equation followed by the solution of a simplified current continuity equation are used in the simulations. The effects of various MOSFET design parameters, including the channel length and width, oxide thickness and channel doping, on the threshold voltage lowering and fluctuations are studied using typical samples of 200 atomistically different MOSFETs. The atomistic results for the threshold voltage fluctuations were compared with two analytical models based on dopant number fluctuations. Although the analytical models predict the general trends in the threshold voltage fluctuations, they fail to describe quantitatively the magnitude of the fluctuations. The distribution of the atomistically calculated threshold voltage and its correlation with the number of dopants in the channel of the MOSFETs was analyzed based on a sample of 2500 microscopically different devices. The detailed analysis shows that the threshold voltage fluctuations are determined not only by the fluctuation in the dopant number, but also in the dopant position},
   Keywords = {MOSFET
Poisson equation
doping profiles
semiconductor device models
0.1 micron
MOSFET
Poisson equation
current continuity equation
random dopant
three-dimensional atomistic simulation
threshold voltage fluctuations
threshold voltage lowering},
   Year = {1998} }



@article{
PV:razor,
   Author = {Austin, T. and Blaauw, D. and Mudge, T. and Flautner, K.},
   Title = {Making typical silicon matter with Razor},
   Journal = {IEEE Computer},
   Volume = {37},
   Number = {3},
   Pages = {57-65},
   Note = {0018-9162},
   Abstract = {Voltage scaling has emerged as a powerful technology for addressing the power challenges that current on-chip densities pose. Razor is a voltage-scaling technology based on dynamic, in-situ detection and correction of circuit-timing errors. Razor permits design optimizations that tune the energy in a microprocessor pipeline to typical circuit-operational levels. This eliminates the voltage margins that traditional worst-case design methodologies require and lets digital systems run correctly and robustly at the edge of minimum power consumption. Occasional heavyweight computations may fail and require additional time and energy for recovery, but the optimized pipeline requires significantly less energy overall than traditional designs.},
   Keywords = {circuit optimisation
error correction
error detection
hardware-software codesign
logic CAD
microprocessor chips
pipeline processing
power consumption
timing
Razor
circuit-timing error correction
circuit-timing error detection
codesign methodology
design optimizations
digital systems
microprocessor pipeline
minimum power consumption
on-chip densities
timing speculation
voltage scaling},
   Year = {2004} }



@inproceedings{
PV:AN05,
   Author = {Azizi, N. and Najm, F. N.},
   Title = {Compensation for within-die variations in dynamic logic by using body-bias},
   BookTitle = {IEEE-NEWCAS Conference},
   Pages = {167-170},
   Abstract = {We propose a fine-grained scheme to compensate for within-die variations in dynamic logic to reduce the variation in leakage, delay and noise margin through body-biasing. We first show that the amount of body-bias compensation needed depends on the correlation that exists between gates, and then analytically show the possible reduction in the variance of the leakage of both a single and multiple dynamic logic gates. We then design a circuit to implement the system which provides the reduction in the variance of the leakage, delay and noise margin of dynamic logic gates and show that it produces a close match to the analytical results. In our design, the variance of a typical test circuit is reduced by 27% and the variance of the path delay is reduced by 33%.},
   Keywords = {compensation
leakage currents
logic design
logic gates
body bias
delay variation
dynamic logic gates
leakage variation
noise margin variation
variance reduction
within-die variation compensation},
   Year = {2005} }



@inproceedings{
PV:XVS+02,
   Author = {Bai, Xiaoliang and Visweswariah, C. and Strenski, P. N. and Hathaway, D. J.},
   Title = {Uncertainty-aware circuit optimization},
   BookTitle = {Design Automation Conference},
   Pages = {58-63},
   Abstract = {Well-tuned digital circuits have a large number of equally critical paths, which form a so-called "wall" in the slack histogram. However, by the time the design has been through manufacturing, many uncertainties cause these carefully aligned delays to spread out. Inaccuracies in parasitic predictions, clock slew, model-to-hardware correlation, static timing assumptions and manufacturing variations all cause the performance to vary from prediction. Simple statistical principles tell us that the variation of the limiting slack is larger when the height of the wall is greater. Although the wall may be the optimum solution if the static timing predictions were perfect, in the presence of uncertainty in timing and manufacturing, it may no longer be the best choice. The application of formal mathematical optimization in transistor sizing increases the height of the wall, thus exacerbating the problem. There is also a practical matter that schematic restructuring downstream in the design methodology is easier to conceive when there are fewer equally critical paths. This paper describes a method that gives formal mathematical optimizers the incentive to avoid the wall of equally critical paths, while giving up as little as possible in nominal performance.},
   Keywords = {circuit optimisation
circuit simulation
circuit tuning
delays
digital integrated circuits
integrated circuit design
logic simulation
microprocessor chips
timing
clock slew
delays
design methodology
equally critical paths
formal mathematical optimization
high-performance microprocessor macros
manufacturing variations
model-to-hardware correlation
parasitic predictions
schematic restructuring
slack histogram
static timing assumptions
transistor sizing
tuned digital circuits
uncertainty-aware circuit optimization},
   Year = {2002} }



@inproceedings{
PV:BVB03,
   Author = {Bhardwaj, S. and Vrudhula, S. B. K. and Blaauw, D.},
   Title = {TAU: Timing analysis under uncertainty},
   BookTitle = {International Conference on Computer Aided Design },
   Pages = {615-620},
   Abstract = {Due to excessive reduction in the gate length, dopant concentrations and the oxide thickness, even the slightest of variations in these quantities can result in significant variations in the performance of a device. This has resulted in a need for efficient and accurate techniques for performing Statistical Analysis of circuits. In this paper we propose a methodology based on Bayesian Networks for computing the exact probability distribution of the delay of a circuit. In case of large circuits where it is not possible to compute the exact distribution, we propose methods to reduce the problem size and get a tight lower bound on the exact distribution.},
   Keywords = {belief networks
delay circuits
network analysis
probability
statistical analysis
timing circuits
Bayesian networks
delay circuit
dopant concentrations
gate length
oxide thickness
probability distribution
statistical circuits analysis
timing analysis},
   Year = {2003} }



@article{
PV:borkar05-micro,
   Author = {Borkar, S.},
   Title = {Designing reliable systems from unreliable components: the challenges of transistor variability and degradation},
   Journal = {Micro, IEEE},
   Volume = {25},
   Number = {6},
   Pages = {10-16},
   Abstract = {As technology scales, variability in transistor performance continues to increase, making transistors less and less reliable. This creates several challenges in building reliable systems, from the unpredictability of delay to increasing leakage current. Finding solutions to these challenges require a concerted effort on the part of all the players in a system design. This article discusses these effects and proposes microarchitecture, circuit, and testing research that focuses on designing with many unreliable components (transistors) to yield reliable system designs.},
   Keywords = {VLSI
integrated circuit design
integrated circuit reliability
integrated circuit testing
logic design
microprocessor chips
transistors
delay
leakage current
microarchitecture
reliable system design
testing
transistor degradation
transistor variability
unreliable components
Hardware Computer System Organization},
   Year = {2005} }



@inproceedings{
PV:BKN+03,
   Author = {Borkar, S. and Karnik, T. and Narendra, S. and Tschanz, J. and Keshavarzi, A. and De, V.},
   Title = {Parameter variations and impact on circuits and microarchitecture},
   BookTitle = {Design Automation Conference},
   Pages = {338-342},
   Abstract = {Parameter variation in scaled technologies beyond 90nm will pose a major challenge for design of future high performance microprocessors. In this paper, we discuss process, voltage and temperature variations; and their impact on circuit and microarchitecture. Possible solutions to reduce the impact of parameter variations and to achieve higher frequency bins are also presented.},
   Keywords = {VLSI
design for manufacture
microprocessor chips
parameter estimation
body bias
frequency bins
high performance design
microarchitecture
parameter variation
process variations
temperature variations
voltage variations},
   Year = {2003} }



@inproceedings{
PV:tutorial-isqed06,
   Author = {Bowman, K. and Orshansky, M. and Sapatnekar, S.},
   Title = {Tutorial: Variability and its impact on Design Emerging Technologies for VLSI Design},
   BookTitle = {International Symposium on Quality Electronic Devices},
      Year = {2006} }



@article{
PV:BDM02,
   Author = {Bowman, K. A. and Duvall, S. G. and Meindl, J. D.},
   Title = {Impact of die-to-die and within-die parameter fluctuations on the maximum clock frequency distribution for gigascale integration},
   Journal = {IEEE Journal of Solid-State Circuits},
   Volume = {37},
   Number = {2},
   Pages = {183-190},
   Note = {0018-9200},
   Abstract = {A model describing the maximum clock frequency (FMAX) distribution of a microprocessor is derived and compared with wafer sort data for a recent 0.25-&mu;m microprocessor. The model agrees closely with measured data in mean, variance, and shape. Results demonstrate that within-die fluctuations primarily impact the FMAX mean and die-to-die fluctuations determine the majority of the FMAX variance. Employing rigorously derived device and circuit models, the impact of die-to-die and within-die parameter fluctuations on future FMAX distributions is forecast for the 180, 130, 100, 70, and 50-nm technology generations. Model predictions reveal that systematic within-die fluctuations impose the largest performance degradation resulting from parameter fluctuations. Assuming a 3&sigma; channel length deviation of 20%, projections for the 50-nm technology generation indicate that essentially a generation of performance gain can be lost due to systematic within-die fluctuations. Key insights from this work elucidate the recommendations that manufacturing process controls be targeted specifically toward sources of systematic within-die fluctuations, and the development of new circuit design methodologies be aimed at suppressing the effect of within-die parameter fluctuations},
   Keywords = {CMOS digital integrated circuits
SPICE
critical path analysis
integrated circuit manufacture
integrated circuit modelling
microprocessor chips
semiconductor process modelling
SPICE-equivalent circuit simulator
circuit models
critical path delay variations
device models
die-to-die fluctuations
generic critical path model
gigascale integration
manufacturing process controls
manufacturing tolerances
maximum clock frequency distribution
microprocessor
parameter fluctuations
performance degradation
technology projections
within-die fluctuations},
   Year = {2002} }



@inproceedings{
PV:BBB01,
   Author = {Bruni, D. and Bogliolo, A. and Benini, L.},
   Title = {Statistical design space exploration for application-specific unit synthesis},
   BookTitle = {Design Automation Conference},
   Pages = {641-646},
   Abstract = {The capability of performing semi-automated design space exploration is the main advantage of high-level synthesis with respect to RTL design. However, design space exploration performed during; high-level synthesis is limited in scope, since it provides promising solutions that represent good starting points for subsequent optimizations, but it provides no insight about the overall structure of the design space. In this work we propose unsupervised Monte-Carlo design exploration and statistical characterization to capture the key features of the design space. Our analysis provides insight on how various solutions are distributed over the entire design space. In addition, we apply extreme value theory (1997) to extrapolate achievable bounds from the sampling points.},
   Keywords = {Monte Carlo methods
application specific integrated circuits
circuit optimisation
high level synthesis
integrated circuit design
RTL design
application-specific unit synthesis
design space exploration
extreme value theory
high-level synthesis
semi-automated design
statistical design
unsupervised Monte-Carlo design},
   Year = {2001} }



@inproceedings{
PV:CLL+00,
   Author = {Cao, K. M. and Lee, W. C. and Liu, W. and Jin, X. and Su, P. and Fung, S. K. H. and An, J. X. and Yu, B. and Hu, C.},
   Title = {BSIM4 gate leakage model including source-drain partition},
   BookTitle = {International Electron Devices Meeting},
   Pages = {815-818},
   Abstract = {Gate dielectric leakage current becomes a serious concern as sub-20 &Aring; gate oxide prevails in advanced CMOS processes. Oxide this thin can conduct significant leakage current by various direct-tunneling mechanisms and degrade circuit performance. While the gate leakage current of MOS capacitors has been much studied, little has been reported on compact MOSFET modeling with gate leakage. In this work, an analytical intrinsic gate leakage model for MOSFET with physical source/drain current partition is developed. This model has been implemented in BSIM4},
   Keywords = {MOSFET
leakage currents
semiconductor device models
tunnelling
BSIM4
advanced CMOS processes
compact MOSFET modeling
dielectric leakage current
direct-tunneling mechanisms
gate leakage model
intrinsic gate leakage model
physical source/drain current partition},
   Year = {2000} }



@inproceedings{
PV:CC05,
   Author = {Cao, Yu and Clark, L. T.},
   Title = {Mapping statistical process variations toward circuit performance variability: an analytical modeling approach},
   BookTitle = {Design Automation Conference},
   Pages = {658-663},
   Abstract = {A physical yet compact gate delay model is developed integrating short-channel effects and the Alpha-power law based timing model. This analytical approach accurately predicts both nominal delay and delay variability over a wide range of bias conditions, including sub-threshold. Excellent model scalability enables efficient mapping between process variations and delay variability at the circuit level. Based on this model, relative importance of physical effects on delay variability has been identified. While effective channel length variation is the leading source for variability at current 90nm node, performance variability is actually more sensitive to threshold variation at the sub-threshold region. Furthermore, this model is applied to investigate the limitation of low power design techniques in the presence of process variations, particularly dual V/sub th/ and L biasing. Due to excessive variability under low V/sub DD/, these techniques become ineffective.},
   Keywords = {delay circuits
integrated circuit modelling
low-power electronics
network synthesis
statistical analysis
timing
alpha-power law
analytical modeling approach
channel length variation
circuit performance variability
delay variability
gate delay model
low power design techniques
model scalability
nominal delay
short-channel effects
statistical process variations
threshold variation
timing model},
   Year = {2005} }



@inproceedings{
PV:CGK+02,
   Author = {Cao, Y. and Gupta, P. and Kahng, A. B. and Sylvester, D. and Yang, J.},
   Title = {Design sensitivities to variability: extrapolations and assessments in nanometer VLSI},
   BookTitle = {IEEE International ASIC/SOC Conference},
   Pages = {411-415},
   Abstract = {We propose a new framework for assessing (1) the impact of process variation on circuit performance and product value, and (2) the respective returns on investment for alternative process improvements. Elements of our framework include accurate device models and circuit simulation, along with Monte-Carlo analyses, to estimate parametric yields. We evaluate the merits of taking into account such previously unconsidered phenomena as correlations among process parameters. We also evaluate the impact of process variation with respect to such relevant metrics as parametric yield at selling point, and amount of required design guardbanding. Our experimental results yield insights into the scaling of process variation impacts through the next two ITRS technology nodes.},
   Keywords = {Monte Carlo methods
VLSI
circuit simulation
extrapolation
integrated circuit design
integrated circuit economics
integrated circuit modelling
integrated circuit yield
nanoelectronics
parameter estimation
ITRS technology nodes
Monte-Carlo analysis
circuit performance
circuit simulation
design guardbanding
design sensitivity
device models
extrapolations
nanometer VLSI
parametric yields
process improvements
process metrics
process parameter correlations
process variation
process variation scaling
product value
return on investment
selling point},
   Year = {2002} }



@article{
PV:VR05,
   Author = {Chandu, Visweswariah and Kaushik, Ravindran},
   Title = {First-Order Incremental Block-Based Statistical Timing Analysis},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {99},
   Number = {99},
   Pages = {1},
   Abstract = {Variability in digital integrated circuits makes tim-ing verification an extremely challenging task. In this paper, a canonical first order delay model is proposed that takes into account both correlated and independent randomness. A novel linear-time block-based statistical timing algorithm is employed to propagate timing quantities like arrival times and required arrival times through the timing graph in this canonical form. At the end of the statistical timing, the sensitivity of all timing quantities to each of the sources of variation is available. Exces-sive sensitivities can then be targeted by manual or automatic optimization methods to improve the robustness of the design. This paper also reports the first incremental statistical timer in the literature which is suitable for use in the inner loop of physical synthesis or other optimization programs. The third novel contribution of this paper is the computation of local and global criticality probabilities. For a very small cost in CPU time, the probability of each edge or node of the timing graph being critical is computed. Numerical results are presented on industrial ASIC chips with over two million logic gates, and statistical timing results are compared to exhaustive corner analysis on a chip design whose hardware showed early-mode timing violations. },
      Year = {2005} }



@article{
PV:CN03,
   Author = {Chen, T. and Naffziger, S.},
   Title = {Comparison of adaptive body bias (ABB) and adaptive supply voltage (ASV) for improving delay and leakage under the presence of process variation},
   Journal = {IEEE Transactions on Very Large Scale Integration (VLSI) Systems},
   Volume = {11},
   Number = {5},
   Pages = {888-899},
   Note = {1063-8210},
   Abstract = {Process variations as a percentage of nominal delay and power consumption are becoming more and more severe with continuing scaling of VLSI technology. The worsening process variation causes increased variability in performance, power, and reliability of VLSI circuits. Thus, performance and power consumption targets obtained during the design phase of VLSI circuits may significantly deviate from that of actual silicon resulting in significant yield losses. Adaptive body bias (ABB) has been shown to be an effective method of postsilicon tuning to reduce variability under the presence of process variation. Post silicon tuning can also be accomplished by using adaptive supply voltage (ASV). This paper compares the effectiveness of ABB and ASV in reducing variability and improving performance and power, and thus, yield.},
   Keywords = {VLSI
delays
integrated circuit yield
leakage currents
VLSI circuit
adaptive body bias
adaptive supply voltage
delay
leakage current
postsilicon tuning
power consumption
process variation
technology scaling
yield},
   Year = {2003} }



@article{
PV:Cla61,
   Author = {Clark, C.},
   Title = {The greatest of a finite set of random variables},
   Journal = {Operations Research},
   Pages = {145-162},
      Year = {1961} }



@article{
PV:CCH+98,
   Author = {Conn, A. R. and Coulman, P. K. and Haring, R. A. and Morrill, G. L. and Visweswariah, C. and Chai Wah, Wu},
   Title = {JiffyTune: circuit optimization using time-domain sensitivities},
   Journal = { IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {17},
   Number = {12},
   Pages = {1292-1309},
   Note = {0278-0070},
   Abstract = {Automating the transistor and wire-sizing process is an important step toward being able to rapidly design high-performance, custom circuits. This paper presents a circuit optimization tool that automates the tuning task by means of state-of-the-art nonlinear optimization. It makes use of a fast circuit simulator and a general-purpose nonlinear optimization package. It includes minimax and power optimization, simultaneous transistor and wire tuning, general choices of objective functions and constraints, and recovery from nonworking circuits. In addition, the tool makes use of designer-friendly interfaces that automate the specification of the optimization task, the running of the optimizer, and the back-annotation of the results of optimization onto the circuit schematic. Particularly for large circuits, gradient computation is usually the bottleneck in the optimization procedure. In addition to traditional adjoint and direct methods, we use a technique called the adjoint Lagrangian method, which computes all the gradients necessary for one iteration of optimization in a single adjoint analysis. This paper describes the algorithms and the environment in which they are used and presents extensive circuit optimization results. A circuit with 6900 transistors, 4128 tunable transistors, and 60 independent parameters was optimized in about 108 min of CPU time on an IBM RISC/System 6000, model 590},
   Keywords = {circuit layout CAD
circuit optimisation
circuit simulation
integrated circuit layout
minimax techniques
sensitivity analysis
time-domain analysis
JiffyTune
adjoint Lagrangian method
back-annotation
circuit optimization tool
circuit schematic
designer-friendly interfaces
fast circuit simulator
general-purpose nonlinear optimization package
high-performance custom circuit design
minimax
objective functions
power optimization
time-domain sensitivities
transistor sizing process
wire-sizing process},
   Year = {1998} }



@inproceedings{
PV:DKS04,
   Author = {Davoodi, A. and Khandelwal, V. and Srivastava, A.},
   Title = {Variability inspired implementation selection problem},
   BookTitle = {IEEE/ACM International Conference on Computer Aided Design},
   Pages = {423-427},
   Abstract = {Given a directed acyclic graph and different possible implementations for each node, the implementation selection problem (ISP) selects the appropriate implementation for each node such that a given global design objective is optimized, ISP is a generic formulation that is explicitly or implicitly solved in several design automation problems like leakage optimization using dual V/sub th/, gate sizing, etc. An implementation of a node results in an associated delay and perhaps cost for the node. In the presence of different sources of uncertainty and fabrication variability, fixed estimates of delays and costs of a node are extremely erroneous. We investigate a probabilistic approach to solve ISP by considering probability density functions for delays and costs of a node. We propose a dynamic-programming based approach in a probabilistic sense and introduce effective pruning criteria when dealing with probability distributions for identifying co-optimal solution at each stage. A case study of leakage optimization using dual V/sub th/ is presented where we show the effectiveness of a probabilistic approach considering V/sub th/ variability over a traditional deterministic one.},
   Keywords = {circuit optimisation
dynamic programming
electrical faults
network analysis
probability
dynamic-programming
effective pruning criteria
fabrication variability
implementation selection problem
leakage optimization
node costs
node delays
probabilistic approach
probability density functions
probability distributions},
   Year = {2004} }



@inproceedings{
PV:DJK92,
   Author = {Devadas, S. and Jyu, H. F. and Keutzer, K. and Malik, S.},
   Title = {Statistical timing analysis of combinational circuits},
   BookTitle = {International Conference on Computer Design: VLSI in Computers and Processors},
   Pages = {38-43},
   Abstract = {The authors develop efficient methods for computing an exact probability distribution of the delay of a combinational circuit, given probability distributions for the gate and wire delays. The derived distribution can give the probability that a combinational circuit will achieve a certain performance, across the possible range. The techniques target fast analysis as well as reduced memory requirements. The authors define a notion of falsity of paths when dealing with probability distributions on gate and wire delays, and they give methods for identifying and ignoring false paths in their probabilistic analysis, so as to obtain correct and accurate answers to the performance prediction question. Some results and comparisons are given for a number of combinational circuit benchmarks},
   Keywords = {combinatorial circuits
delays
logic testing
benchmarks
combinational circuits
delay
exact probability distribution
gate delays
performance prediction
reduced memory requirements
statistical timing analysis
wire delays},
   Year = {1992} }



@inproceedings{
PV:DK03,
   Author = {Devgan, A. and Kashyap, C.},
   Title = {Block-based static timing analysis with uncertainty},
   BookTitle = {ICCAD},
   Pages = {607-614},
   Abstract = {Static timing analysis is a critical step in design of any digital integrated circuit. Technology and design trends have led to significant increase in environmental and process variations which need to be incorporated in static timing analysis. This paper presents a new, efficient and accurate block-based static timing analysis technique considering uncertainty. This new method is more efficient as its models arrival times as cumulative density functions (CDFs) and delays as probability functions (PDFs). Computationally simple expression are presented for basic static timing operations. The techniques are valid for any form of the probability distribution, though the use piecewise linear modeling of CDFs is highlighted in this paper. Reconvergent fanouts are handled using a new technique that avoids path tracing. Variable accuracy timing analysis can be performed by varying the modeling accuracy of the piecewise linear model. Regular and statistical timing on different parts of the circuit can be incorporated into a single timing analysis run. Accuracy and efficiency of the proposed method is demonstrated for various ISCAS benchmark circuits.},
   Keywords = {delays
integrated circuit design
piecewise linear techniques
probability
timing
CDF
ISCAS benchmark circuits
PDF
arrival times
cumulative density functions
delays
digital integrated circuit design
piecewise linear modeling
probability distribution
probability functions
regular timing
static timing analysis
static timing operations
statistical timing
variable timing analysis},
   Year = {2003} }



@inproceedings{
PV:DN05,
   Author = {Devgan, A. and Nassif, S.},
   Title = {Power variability and its impact on design},
   BookTitle = {18th International Conference on  VLSI Design },
   Pages = {679-682},
   Abstract = {Power is emerging as the next grand challenge in integrated circuit design. Impact of increasing power consumption can be seen from handheld device all the way to design of high end server microprocessors. Total power consumption is increasingly governed by exponentially increasing leakage power in newer process technologies. Along with increase in power consumption, the industry is also facing growing issues with inherent environmental and process variations. This growing variability has a tremendous impact on power and power variability. We review various issues with power variability and its impact on design.},
   Keywords = {integrated circuit design
leakage currents
microprocessor chips
power consumption
handheld device
integrated circuit design
leakage power
power consumption
power variability
process technologies
process variations
server microprocessors},
   Year = {2005} }



@article{
PV:DFK93,
   Author = {Director, S. W. and Feldmann, P. and Krishna, K.},
   Title = {Statistical integrated circuit design},
   Journal = {Solid-State Circuits, IEEE Journal of},
   Volume = {28},
   Number = {3},
   Pages = {193-202},
   Note = {0018-9200},
   Abstract = {Several statistical design methods that have been developed to minimize the effects of IC manufacturing process disturbances on circuit performance are reviewed. It is shown that statistical design problems can be expressed as optimization problems in which either the objective function or the constraint functions depend on expectations of random variables. The effectiveness of the most recent such method, the boundary integral method is illustrated with several circuit design examples},
   Keywords = {circuit CAD
design engineering
integrated circuit manufacture
monolithic integrated circuits
optimisation
statistical analysis
IC manufacturing process disturbances
boundary integral method
circuit design examples
constraint functions
integrated circuit design
objective function
optimization problems
statistical design methods
statistical design problems
yield maximisation},
   Year = {1993} }



@inproceedings{
PV:DKB+03,
   Author = {Dongwoo, Lee and Kwong, W. and Blaauw, D. and Sylvester, D.},
   Title = {Simultaneous subthreshold and gate-oxide tunneling leakage current analysis in nanometer CMOS design},
   BookTitle = {International Symposium on Quality Electronic Design},
   Pages = {287-292},
   Abstract = {In this paper we develop a fast approach to analyze the total leakage power of a large circuit block, considering both gate leakage, I/sub gate/, and subthreshold leakage, I/sub sub/. The interaction between I/sub sub/ and I/sub gate/ complicates analysis in arbitrary CMOS topologies. We propose simple and accurate heuristics to quickly estimate the state-dependent total leakage current considering the interaction between I/sub sub/ and I/sub gate/. We apply this method to ISCAS benchmark circuits in a projected 100 nm technology and demonstrate excellent accuracy compared to SPICE simulation with a 20,000X speedup on average.},
   Keywords = {CMOS digital integrated circuits
integrated circuit design
integrated circuit modelling
leakage currents
nanoelectronics
tunnelling
ISCAS benchmark circuits
SPICE simulation
arbitrary CMOS topologies
circuit block
gate leakage
gate oxide tunneling leakage current analysis
heuristics
leakage power
nanometer CMOS design
state dependent total leakage current
subthreshold leakage},
   Year = {2003} }



@article{
PV:EB03,
   Author = {Elgamel, M. A. and Bayoumi, M. A.},
   Title = {Interconnect noise analysis and optimization in deep submicron technology},
   Journal = {Circuits and Systems Magazine},
   Volume = {3},
   Number = {4},
   Pages = {6-17},
   Note = {1531-636X},
   Abstract = {The migration to using ultra deep submicron (UDSM) process, 0.25 /spl mu/m or below, necessitates new design methodologies and EDA tools to address the new design challenges. One of the main challenges is noise. All different types of deep submicron such as cross talk, leakage, supply noise and process variations are obstacles in the way of achieving the desired level of noise immunity without giving up the improvement achieved in performance and energy efficiency. This article describes research directions and various levels of design abstraction to handle the interconnect challenges. These directions include approaches to adopt new analytical methods for interconnects, physical design levels and finally ways to face these challenges early in a higher level of the design process.},
   Keywords = {VLSI
circuit optimisation
integrated circuit interconnections
integrated circuit modelling
integrated circuit noise
0.25 micron
EDA tools
UDSM
cross talk
deep submicron technology
energy efficiency
interconnect design performance
interconnect noise analysis
interconnect optimization
leakage
noise immunity
physical design levels
process variations
supply noise
ultra deep submicron process},
   Year = {2003} }



@inproceedings{
PV:ELZ96,
   Author = {Elzinga, H.},
   Title = {On the impact of spatial parametric variations on MOS transistor mismatch},
   BookTitle = {IEEE International Conference on Microelectronic Test Structures},
   Pages = {173-177},
   Abstract = {Quite often, especially during the development of advanced MOS fabrication processes, it is observed that for matched-pair MOS transistors with large device dimensions the general mismatch law &ldquo;&sigma;(&Delta;P)=A<sub>P</sub>/&radic;(area)&rdquo; (or its derivatives) does not always hold. This paper demonstrates that an explanation for this effect can be found in the presence of non-random parameter distributions over a wafer (referred to as spatial parametric variations). Furthermore, the use of a dedicated test structure to distinguish between random and non-random mismatch causes is presented},
   Keywords = {MOSFET
semiconductor device testing
MOS transistor mismatch
dedicated test structure
matched-pair MOS transistors
mismatch law
nonrandom mismatch causes
nonrandom parameter distributions
random mismatch causes
spatial parametric variations},
   Year = {1996} }



@inproceedings{
PV:FC05,
   Author = {Fei, Su and Chakrabarty, K.},
   Title = {Unified high-level synthesis and module placement for defect-tolerant microfluidic biochips},
   BookTitle = {Design Automation Conference},
   Pages = {825-830},
   Abstract = {Microfluidic biochips promise to revolutionize biosensing and clinical diagnostics. As more bioassays are executed concurrently on a biochip, system integration and design complexity are expected to increase dramatically. This problem is also identified by the 2003 ITRS document as a major system-level design challenge beyond 2009. We focus here on the automated design of droplet-based microfluidic biochips. We present a synthesis methodology that unifies operation scheduling, resource binding, and module placement for such "digital" biochips. The proposed technique, which is based on parallel recombinative simulated annealing, can also be used after fabrication to bypass defective cells in the microfluidic array. A real-life protein assay is used to evaluate the synthesis methodology.},
   Keywords = {biosensors
high level synthesis
microfluidics
microsensors
biosensing
clinical diagnostics
defect-tolerant microfluidic biochips
digital biochips
droplet-based microfluidic biochips
microfluidic array
module placement
operation scheduling
parallel recombinative simulated annealing
real-life protein assay
resource binding
unified high-level synthesis},
   Year = {2005} }



@inproceedings{
PV:tutorial-iccad05,
   Author = {Feldmann, P. and Schlichtmann, U.},
   Title = {Tutorial: Yield Maximization Algorithms},
   BookTitle = {International Conference on Computer Aided Design},
      Year = {2005} }



@inproceedings{
hls:criticality,
   Author = {Wang, F. and Xie, Y. and Ju, H.},
   Title = {A novel criticality computation method in statistical timing analysis},
   Series = {DATE},
      Year = {2007} }



@inproceedings{
PV:FN03,
   Author = {Ferzli, I. A. and Najm, F. N.},
   Title = {Statistical estimation of leakage-induced power grid voltage drop considering within-die process variations},
   BookTitle = {Design Automation Conference},
   Pages = {856-859},
   Abstract = {Transistor threshold voltages (V/sub th/) have been reduced as part of on-going technology scaling. The smaller V/sub th/ values feature increased fluctuations due to process variations, with a strong within-die component. Correspondingly, given the exponential dependence of leakage on V/sub th/, circuit leakage currents are increasing significantly and have strong within-die statistical variations. With these currents loading the power grid, the grid develops large voltage drops, which is an unavoidable background level of noise on the grid. We develop techniques for estimation of the statistics of the leakage-induced power grid voltage drop based on given statistics of the circuit leakage currents.},
   Keywords = {MOS integrated circuits
integrated circuit design
integrated circuit noise
leakage currents
statistical analysis
circuit leakage current
leakage-induced voltage drop
noise background level
power grid voltage drop
statistical estimation
technology scaling
transistor threshold voltage
within-die component
within-die process variation},
   Year = {2003} }



@inproceedings{
PV:FSR+97,
   Author = {Frank, D. J. and Solomon, P. and Reynolds, S. and Shin, J.},
   Title = {Supply and threshold voltage optimization for low power design},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {317-322},
   Abstract = {One of the most effective ways to design low power circuits is to use low power supply voltages. If the threshold voltages are also reduced, it is possible to maintain good performance at these lower voltages. This paper addresses the question of how to choose the optimum supply and threshold voltages for low power design. Other workers have also addressed this question, but have only considered nominal conditions or nominal conditions plus simplified tolerances. These prior works have used simplified models for device switching speed and power dissipation. In the present work we take more detailed account of parameter tolerances, take full account of short channel effects in the devices, and carry out full circuit simulations to obtain accurate speed and power information.},
   Keywords = {circuit optimisation
circuit simulation
low power design
optimization
parameter tolerance
power dissipation
short channel effect
supply and voltage
switching speed
threshold voltage},
   Year = {1997} }



@inproceedings{
PV:FTL+99,
   Author = {Frank, D. J. and Taur, Y. and Ieong, M. and Wong, H. S. P.},
   Title = {Monte Carlo modeling of threshold variation due to dopant fluctuations},
   BookTitle = {VLSI Circuits },
   Pages = {171-172},
   Abstract = {This paper presents a new, 3-D Monte Carlo approach for modeling random dopant fluctuation effects in MOSFETs. The method takes every silicon atom in the device into account and is generally applicable to arbitrary nonuniform doping profiles. In addition to body dopant fluctuations, the effect of source-drain dopant fluctuations on short-channel threshold voltage is studied for the first time. The result clearly indicates the benefit of retrograde body doping and shallow/abrupt source-drain junctions. It also quantifies the magnitude of threshold voltage variations due to discrete dopant fluctuations in an optimally designed 25 nm MOSFET},
   Keywords = {MOSFET
Monte Carlo methods
doping profiles
semiconductor device models
25 nm
3D Monte Carlo approach
MOSFETs
arbitrary nonuniform doping profiles
body dopant fluctuations
discrete dopant fluctuations
random dopant fluctuation effects
retrograde body doping
shallow/abrupt source-drain junctions
short-channel threshold voltage
source-drain dopant fluctuations
threshold variation},
   Year = {1999} }



@inproceedings{
PV:FW00,
   Author = {Frank, D. J. and Wong, H. S. P.},
   Title = {Simulation of stochastic doping effects in Si MOSFETs},
   BookTitle = {International Workshop on Computational Electronics },
   Pages = {2-3},
   Abstract = {MOSFET threshold voltage (V<sub>T</sub>) variation due to random variations in the number and position of dopant atoms is an increasingly important problem as device dimensions shrink and has received increasing attention. This paper describes a recently implemented 3-D Monte Carlo approach for modeling random dopant fluctuation effects in MOSFETs. We also describe the results of simulating dopant fluctuation effects in several different MOSFET structures},
   Keywords = {MOSFET
Monte Carlo methods
doping profiles
elemental semiconductors
semiconductor device models
silicon
stochastic processes
3D Monte Carlo model
Si
Si MOSFET
numerical simulation
random fluctuations
stochastic doping effect
threshold voltage},
   Year = {2000} }



@inproceedings{
PV:GND+01,
   Author = {Gattiker, A. and Nassif, S. and Dinakar, R. and Long, C.},
   Title = {Timing yield estimation from static timing analysis},
   BookTitle = {International Symposium on Quality Electronic Design},
   Pages = {437-442},
   Abstract = {This paper presents a means for estimating parametric timing yield and guiding robust design for-quality in the presence of manufacturing and operating environment variations. Dual emphasis is on computational efficiency and providing meaningful robust-design guidance. Computational efficiency is achieved by basing the proposed methodology on a post-processing step applied to the report generated as a by-product of static timing analysis. Efficiency is also ensured by exploiting the fact that for small processing/environment variations, a linear model is adequate for capturing the resulting delay change. Meaningful design guidance is achieved by analyzing the timing-related influence of variations on a path-by-path basis, allowing designers perform a quality-oriented design pass focused on key paths. A coherent strategy is provided to handle both die-to-die and within-die variations. Examples from a PowerPC microprocessor illustrate the methodology and its capabilities},
   Keywords = {circuit CAD
circuit simulation
delays
design for manufacture
integrated circuit design
integrated circuit yield
timing
PowerPC microprocessor
computational efficiency
delay change
die-to-die variations
linear model
operating environment variations
parametric timing yield
path-by-path basis
post-processing step
processing/environment variations
quality-oriented design pass
robust design for-quality
robust-design guidance
static timing analysis
timing yield estimation
timing-related influence
within-die variations},
   Year = {2001} }



@inproceedings{
PV:GC04,
   Author = {Gregg, J. and Chen, T. W.},
   Title = {Post silicon power/performance optimization in the presence of process variations using individual well adaptive body biasing (IWABB)},
   BookTitle = {International Symposium on Quality Electronic Design},
   Pages = {453-458},
   Abstract = {Continued scaling of silicon process technologies beyond the 90nm node will face problems due to within die process variations. The increasing relative magnitude of within die process variations will cause power-frequency distributions to widen, thus reducing manufacturing yields. Mitigating the effects of these process variations can be done by using a system of locally-generated body biases. This system allows for highly localized circuit optimizations with very little overhead in silicon area and routing resources. We present two algorithms to find near-optimal configurations of these biases which can be applied during post-fabrication testing. The system can improve an initial yield of 12% to 73%.},
   Keywords = {CMOS digital integrated circuits
circuit layout CAD
circuit optimisation
design for manufacture
evolutionary computation
integrated circuit layout
integrated circuit yield
microprocessor chips
network routing
CMOS process
highly localized circuit optimizations
individual well adaptive body biasing
locally-generated body biases
manufacturing yields
microprocessor design
near-optimal configurations
post silicon power-performance optimization
post-fabrication testing
power-frequency distributions
process technology scaling
process variations
single-objective evolutionary algorithm
within die variations},
   Year = {2004} }



@inproceedings{
PV:ZJD+05,
   Author = {Gu, Zhenyu and Jia, Wang and Dick, R. R. and Hai, Zhou},
   Title = {Incremental exploration of the combined physical and behavioral design space},
   BookTitle = {Design Automation Conference},
   Pages = {208-213},
   Abstract = {Achieving design closure is one of the biggest headaches for modern VLSI designers. This problem is exacerbated by high-level design automation tools that ignore increasingly important factors such as the impact of interconnect on the area and power consumption of integrated circuits. Bringing physical information up into the logic level or even behavioral-level stages of system design is essential to solve this problem. In this paper, we present an incremental floorplanning high-level synthesis system. This system integrates high-level and physical design algorithms to concurrently improve a system's schedule, resource binding, and floor-plan, thereby allowing the incremental exploration of the combined behavioral-level and physical-level design space. Compared with previous approaches that repeatedly call loosely coupled floorplanners for physical estimation, this approach has the benefit of efficiency, stability, and better quality of results. For designs containing functional units with non-unity aspect ratios, the average CPU time improved by 369 %, the area improved by 14.24 %, and power improved by 4 %.},
   Keywords = {VLSI
circuit layout CAD
delays
high level synthesis
integrated circuit design
integrated circuit interconnections
integrated circuit layout
logic design
resource allocation
VLSI design
behavioral-level design space
high-level design automation tools
high-level synthesis system
incremental floorplanning
integrated circuit interconnection
integrated circuits power consumption
logic level stage
physical design algorithms
physical-level design space
resource binding
system design
system schedule},
   Year = {2005} }



@inproceedings{
PV:GVV+05,
   Author = {Guthaus, M. R. and Venkateswarant, N. and Visweswariaht, C. and Zolotov, V.},
   Title = {Gate sizing using incremental parameterized statistical timing analysis},
   BookTitle = {IEEE/ACM International Conference on Computer-Aided Design},
   Pages = {1029-1036},
   Abstract = {As technology scales into the sub-90 nm domain, manufacturing variations become an increasingly significant portion of circuit delay. As a result, delays must be modeled as statistical distributions during both analysis and optimization. This paper uses incremental, parametric statistical static timing analysis (SSTA) to perform gate sizing with a required yield target. Both correlated and uncorrelated process parameters are considered by using a first-order linear delay model with fitted process sensitivities. The fitted sensitivities are verified to be accurate with circuit simulations. Statistical information in the form of criticality probabilities are used to actively guide the optimization process which reduces run-time and improves area and performance. The gate sizing results show a significant improvement in worst slack at 99.86% yield over deterministic optimization.},
   Keywords = {delays
integrated circuit design
integrated circuit yield
statistical analysis
timing jitter
IC design
IC yield
circuit simulation
correlated parameter
fitted process sensitivity
gate sizing
linear delay
run-time
statistical static timing analysis},
   Year = {2005} }



@inproceedings{
PV:HN97,
   Author = {Halter, J. P. and Najm, F. N.},
   Title = {A gate-level leakage power reduction method for ultra-low-power CMOS circuits},
   BookTitle = {Custom Integrated Circuits Conference},
   Pages = {475-478},
   Abstract = {In order to reduce the power dissipation of CMOS products, semiconductor manufacturers are reducing the power supply voltage. This requires that the transistor threshold voltages be reduced as well to maintain adequate performance and noise margins. However, this increases the subthreshold leakage current of p and n MOSFETs, which starts to offset the power savings obtained from power supply reduction. This problem will worsen in future generations of technology, as threshold voltages are reduced further. In order to overcome this, we propose a design technique that can be used during logic design in order to reduce the leakage current and power. We target designs where parts of the circuit are put in &ldquo;standby&rdquo; mode when not in use, which is becoming a common approach for low power design. The proposed design changes consist of minimal overhead circuitry that puts the circuit into a &ldquo;low leakage standby state&rdquo;, whenever it goes into standby, and allows it to return to its original state when it is reactivated. We give an efficient algorithm for computing a good low leakage power state. We demonstrate this method on the ISCAS-89 benchmark suite and show leakage power reductions of up to 54% for some circuits},
   Keywords = {CMOS logic circuits
VLSI
circuit CAD
combinational circuits
integrated circuit design
leakage currents
logic CAD
ISCAS-89 benchmark suite
MOSFETs
gate-level leakage power reduction method
logic design
low leakage standby state
minimal overhead circuitry
power dissipation
power supply voltage
transistor threshold voltages
ultra-low-power CMOS circuits},
   Year = {1997} }



@inproceedings{
PV:HOK01,
   Author = {Hamada, M. and Ootaguro, Y. and Kuroda, T.},
   Title = {Utilizing surplus timing for power reduction},
   BookTitle = {IEEE Conference on Custom Integrated Circuits},
   Pages = {89-92},
   Abstract = {Multiple Vdd's, multiple Vth's, and multiple transistor width for utilizing surplus timing in non-critical paths for power reduction is investigated. Theoretical models are developed from which rules of thumb for optimum Vdd's, Vth's, and W's are derived, as well as knowledge for future design},
   Keywords = {integrated circuit design
low-power electronics
timing
multiple power supplies
multiple threshold voltages
multiple transistor width
noncritical paths
power reduction
surplus timing},
   Year = {2001} }



@article{
PV:HL92,
   Author = {Harkness, C. L. and Lopresti, D. P.},
   Title = {Interval methods for modeling uncertainty in RC timing analysis},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {11},
   Number = {11},
   Pages = {1388-1401},
   Note = {0278-0070},
   Abstract = {The authors propose representing uncertain parameters as intervals and present a theoretical framework based on interval algebra for manipulating these ranges. To illustrate this methodology, they modify an existing <e1>RC</e1> analysis algorithm (Crystal's PR-Slope model) to create one which computes worst-case delay bounds when given uncertain input parameters. They provide proofs of correctness for the approach and test its performance. Two alternate interval-based techniques which produce even tighter bounds than the original approach are also presented. When compared to Monte Carlo simulation, the interval methods are more precise and significantly faster},
   Keywords = {MOS integrated circuits
VLSI
circuit analysis computing
delays
NMOS
PR-Slope model
RC analysis algorithm
RC timing analysis
VLSI design
interval algebra
interval methods
interval-based techniques
sequential MOS VLSI
timing verification
uncertain input parameters
worst-case delay bounds},
   Year = {1992} }



@inproceedings{
PV:HSW+02,
   Author = {Hess, C. and Stine, B. E. and Weiland, L. H. and Sawada, K.},
   Title = {Logic characterization vehicle to determine process variation impact on yield and performance of digital circuits},
   BookTitle = {International Conference on Microelectronic Test Structures},
   Pages = {189-196},
   Abstract = {Manufacturing of integrated circuits relies on the sequence of many hundred process steps. Each of these steps will have more or less variation, which has to be within a,certain limit to guarantee the chips functionality at a target speed. But, not every chip layout is susceptive to process variation the same way, which requires a link between process,capabilities and product design. This paper will present a novel Logic Characterization Vehicle (LCV) to investigate the yield and performance impact of process variation on high volume product chips. The LCV combines and manipulates new or already documented circuits like memory cells and combinatorial logic circuits within a JIG interface that allows fast and easy testability. Beside the functionality of such circuits, also path delay as well as cross talk issues can be determined. A standard digital functional tester can be used, since all timing critical measurements will be performed within the JIG. The described method allows early implementation of existing circuits for future technology nodes (shrinks). A Design Of Experiments (DOE) based implementation of possible layout manipulations will determine their impact on yield and performance of a target design as well as its sensitivity to process variation. The described approach can be used at a much earlier stage of product and process development, which will significantly shorten yield ramp.},
   Keywords = {automatic testing
combinational circuits
crosstalk
delay estimation
design of experiments
digital integrated circuits
integrated circuit layout
integrated circuit testing
integrated circuit yield
logic testing
production testing
DOE based implementation
IC manufacturing
JIG interface
JIG test
combinatorial logic circuits
cross talk standard digital functional tester
design of experiments
digital circuit performance
digital circuit yield
high volume product chips
layout manipulations
logic characterization vehicle
memory cells
path delay
process variation impact determination
testability
timing critical measurements},
   Year = {2002} }



@article{
PV:HBI+06,
   Author = {Hokazono, A. and Balasubramanian, S. and Ishimaru, K. and Ishiuchi, H. and Tsu-Jae King, Liu and Chenming, Hu},
   Title = {MOSFET design for forward body biasing scheme},
   Journal = {Electron Device Letters, IEEE},
   Volume = {27},
   Number = {5},
   Pages = {387-389},
   Note = {0741-3106},
   Keywords = {MOSFET
doping profiles
elemental semiconductors
silicon
10 nm
30 nm
CMOS technology
MOSFET design
Si
channel doping profiles
channel engineering
forward body biasing scheme
gate work function engineering
Body bias
MOSFET
forward bias
reverse bias
substrate bias
work function},
   Year = {2006} }



@inproceedings{
PV:HS03,
   Author = {Hongliang, Chang and Sapatnekar, S. S.},
   Title = {Statistical timing analysis considering spatial correlations using a single PERT-like traversal},
   BookTitle = {International Conference on Computer Aided Design},
   Pages = {621-625},
   Abstract = {We present an efficient statistical timing analysis algorithm that predicts the probability distribution of the circuit delay while incorporating the effects of spatial correlations of intra-die parameter variations, using a method based on principal component analysis. The method uses a PERT-like circuit graph traversal, and has a run-time that is linear in the number of gates and interconnects, as well as the number of grid partitions used to model spatial correlations. On average, the mean and standard deviation values computed by our method have errors of 0.2% and 0.9%, respectively, in comparison with a Monte Carlo simulation.},
   Keywords = {delay circuits
graphs
principal component analysis
statistical analysis
statistical distributions
timing circuits
Monte Carlo simulation
PERT-like circuit graph traversal
circuit delay
grid partitions
mean values
principal component analysis
probability distribution
spatial correlations
standard deviation values
statistical timing analysis algorithm},
   Year = {2003} }



@inproceedings{
PV:HTS06,
   Author = {Humenay, E. and Tarjan, D. and Skadron, K.},
   Title = {Impact of Parameter Variations on Multi-Core Chips},
   BookTitle = {Workshop on Architectural Support for Gigascale Integration},
      Year = {2006} }



@inproceedings{
hls:weilun06,
   Author = {Hung, W. L. and Wu, X. and Xie, Y.},
   Title = {Guaranteeing performance yield in high-level synthesis},
   BookTitle = {ICCAD},
   Pages = {303-309},
      Year = {2006} }



@article{
PV:HWV05,
   Author = {Hurat, P. and Wang, Y-T and Verghese, N.K.},
   Title = {Sub-90 nanometer variability is here to stay},
   Journal = {EDA Tech Forum},
   Volume = {2},
   Number = {3},
   Pages = {26-28},
      Year = {2005} }



@inproceedings{
PV:HCC+89,
   Author = {Hwang, K. S. and Casavant, A. E. and Chang, C. T. and d'Abreu, M. A.},
   Title = {Scheduling and hardware sharing in pipelined data paths},
   BookTitle = {IEEE International Conference on Computer-Aided Design },
   Pages = {24-27},
   Abstract = {A scheduling and hardware sharing algorithm is presented. This algorithm is generic and can be used for synthesizing both nonpipelined and pipelined data paths. The scheduling algorithm tries to distribute operations equally among partitions to maximize hardware sharing. Multiplexer delays are explicitly considered to produce a more accurate scheduling. In hardware sharing, structural parameters such as the size of multiplexers, interconnect overhead, the size of the smallest sharable operator etc. are used to control the amount of sharing globally and produce a heuristically optimized RTL structure. The scheduling algorithm is iterated until a satisfactory structure is obtained. The algorithm also can be used for partitioning a large system into implementable pieces. The algorithm has been used successfully for synthesizing a pipelined data path from a graphics processing description that contains about 1000 components},
   Keywords = {circuit layout CAD
logic CAD
pipeline processing
scheduling
generic algorithm
graphics processing description
hardware sharing algorithm
heuristically optimized RTL structure
implementable pieces
interconnect overhead
iteration
multiplexer delays
multiplexer size
nonpipelined data paths
operations distribution
partitioning
partitions
pipelined data paths
scheduling algorithm
smallest sharable operator
structural parameters},
   Year = {1989} }



@misc{
PV:cell06,
   Author = {IBM},
   Title = {CELL processor Yield},
   Abstract = {CELL yield very low},
         Year = {2006} }



@book{
PV:JM95,
   Author = {J.Zhang and M.Styblinski},
   Title = {Yield and variability optimization of integrated cricuits},
   Publisher = {Kluwer Publishers},
      Year = {1995} }



@inproceedings{
PV:JB00,
   Author = {Jacobs, E. T. A. F. and Berkelaar, M. R. C. M.},
   Title = {Gate sizing using a statistical delay model},
   BookTitle = {Design, Automation and Test in Europe Conference and Exhibition},
   Pages = {283-290},
   Abstract = {This paper is about gate sizing under a statistical delay model. It shows we can solve the gate sizing problem exactly for a given statistical delay model. The formulation used allows many different forms of objective functions, which could for example directly optimize the delay uncertainty at the circuit outputs. We formulate the gate sizing problem as a nonlinear programming problem, and show that if we do this carefully, we can solve these problems exactly for circuits up to a few thousand gates using the publicly available large scale nonlinear programming solver LANCELOT},
   Keywords = {delay estimation
logic design
nonlinear programming
statistical analysis
LANCELOT solver
gate sizing
nonlinear programming problem
objective functions
statistical delay model},
   Year = {2000} }



@inproceedings{
PV:JKN+03,
   Author = {Jess, J. A. G. and Kalafala, K. and Naidu, S. R. and Otten, R. H. J. and Visweswariah, C.},
   Title = {Statistical timing for parametric yield prediction of digital integrated circuits},
   BookTitle = {Design Automation Conference},
   Pages = {932-937},
   Abstract = {Uncertainty in circuit performance due to manufacturing and environmental variations is increasing with each new generation of technology. It is therefore important to predict the performance of a chip as a probabilistic quantity. This paper proposes three novel algorithms for statistical timing analysis and parametric yield prediction of digital integrated circuits. The methods have been implemented in the context of the EinsTimer static timing analyzer. Numerical results are presented to study the strengths and weaknesses of these complementary approaches. Across-the-chip variability continues to be accommodated by EinsTimer's "Linear Combination of Delay (LCD)" mode. Timing analysis results in the face of statistical temperature and V/sub dd/ variations are presented on an industrial ASIC part on which a bounded timing methodology leads to surprisingly wrong results.},
   Keywords = {application specific integrated circuits
integrated circuit design
integrated circuit modelling
integrated circuit yield
statistical analysis
EinsTimer static timing analyzer
LCD mode
across-the-chip variability
bounded timing methodology
chip performance
circuit performance
digital integrated circuit
environmental variation
linear combination of delay
manufacturing variation
parametric yield prediction
statistical temperature
statistical timing analysis},
   Year = {2003} }



@inproceedings{
PV:JXP04,
   Author = {Jiayong, Le and Xin, Li and Pileggi, L. T.},
   Title = {STAC: statistical timing analysis with correlation},
   BookTitle = {Design Automation Conference},
   Pages = {343-348},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:JYS+02,
   Author = {Jin, Cai and Yuan, Taur and Shih-Fen, Huang and Frank, D. J. and Kosonocky, S. and Dennard, R. H.},
   Title = {Supply voltage strategies for minimizing the power of CMOS processors},
   BookTitle = {Symposium on VLSI Technology},
   Pages = {102-103},
   Abstract = {This paper presents a dual supply voltage strategy for reduction of the total (static and dynamic) power of high performance CMOS processors. By expressing CMOS delay, static power, and dynamic power in terms of the power supply voltage V/sub DD/ and threshold voltage V/sub T/, an optimization procedure that takes the circuit activity factor into account is performed to find the V/sub DD/ and V/sub T/ for minimum total power at given performance levels. It is shown that 50% power reduction or 20% performance enhancement can be attained by adopting both a low (0.5 V) supply voltage for high-activity circuits and a high (1.2 V) supply voltage for low-activity circuits in a 100 nm-node CMOS technology.},
   Keywords = {CMOS digital integrated circuits
circuit optimisation
delays
integrated circuit measurement
integrated circuit modelling
low-power electronics
microprocessor chips
power supply circuits
0.5 V
1.2 V
100 nm
CMOS delay
CMOS processors
CMOS technology
circuit activity factor
dual supply voltage strategy
dynamic power
high-activity circuits
low-activity circuits
minimum total power
optimization procedure
power minimization
power supply voltage
processor performance levels
static power
threshold voltage},
   Year = {2002} }



@inproceedings{
PV:JKW+02,
   Author = {Jing-Jia, Liou and Krstic, A. and Wang, L. C. and Kwang-Ting, Cheng},
   Title = {False-path-aware statistical timing analysis and efficient path selection for delay testing and timing validation},
   BookTitle = {Design Automation Conference},
   Pages = {566-569},
   Abstract = {We propose a false-path-aware statistical timing analysis framework. In our framework, cell as well as interconnect delays are assumed to be correlated random variables. Our tool can characterize statistical circuit delay distribution for the entire circuit and produce a set of true critical paths.},
   Keywords = {VLSI
cellular arrays
circuit optimisation
delays
integrated circuit interconnections
integrated circuit modelling
logic CAD
statistical analysis
timing
VLSI
cell delays
circuit delay distribution
correlated random variables
delay testing
efficient path selection
false-path-aware analysis
interconnect delays
statistical timing analysis
timing optimization
timing validation
true critical paths},
   Year = {2002} }



@inproceedings{
PV:JKK+01,
   Author = {Jing-Jia, Liou and Kwang-Ting, Cheng and Kundu, S. and Krstic, A.},
   Title = {Fast statistical timing analysis by probabilistic event propagation},
   BookTitle = {Design Automation Conference},
   Pages = {661-666},
   Abstract = {We propose a new statistical timing analysis algorithm, which produces arrival-time random variables for all internal signals and primary outputs for cell-based designs with all cell delays modeled as random variables. Our algorithm propagates probabilistic timing events through the circuit and obtains final probabilistic events (distributions) at all nodes. The new algorithm is deterministic and flexible in controlling run time and accuracy. However, the algorithm has exponential time complexity for circuits with reconvergent fanouts. In order to solve this problem, we further propose a fast approximate algorithm. Experiments show that this approximate algorithm speeds up the statistical timing analysis by at least an order of magnitude and produces results with small errors when compared with Monte Carlo methods.},
   Keywords = {VLSI
cellular arrays
circuit simulation
delays
fault simulation
logic simulation
sensitivity analysis
statistical analysis
timing
arrival-time random variables
cell delays
cell-based designs
exponential time complexity
final probabilistic events
internal signals
probabilistic event propagation
probabilistic timing events
random variables
reconvergent fanouts
run time
statistical timing analysis},
   Year = {2001} }



@article{
PV:JSR99,
   Author = {Johnson, M. C. and Somasekhar, D. and Roy, K.},
   Title = {Models and algorithms for bounds on leakage in CMOS circuits},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {18},
   Number = {6},
   Pages = {714-725},
   Note = {0278-0070},
   Abstract = {Subthreshold leakage current in deep submicron MOS transistors is becoming a significant contributor to power dissipation in CMOS circuits as threshold voltages and channel lengths are reduced. Consequently, estimation of leakage current and identification of minimum and maximum leakage conditions are becoming important, especially in low power applications. In this paper we outline methods for estimating leakage at the circuit level and then propose heuristic and exact algorithms to accomplish the same task for random combinational logic. In most cases the heuristic is found to obtain bounds on leakage that are close and often identical to bounds determined by a complete branch and bound search. Methods are also demonstrated to show how estimation accuracy can be traded off against execution time. The proposed algorithms have potential application in power management applications or quiescent current (I<sub>D</sub>DQ) testing if one wished to control leakage by application of appropriate input vectors. For a variety of benchmark circuits, leakage was found to vary by as much as a factor of six over the space of possible input vectors},
   Keywords = {CMOS digital integrated circuits
VLSI
integrated circuit modelling
integrated circuit testing
leakage currents
logic testing
low-power electronics
CMOS circuits
benchmark circuits
branch and bound search
channel lengths
circuit level
deep submicron MOS transistors
exact algorithms
execution time
heuristic algorithms
input vectors
low power applications
power dissipation
power management applications
quiescent current
random combinational logic
subthreshold leakage current
threshold voltages},
   Year = {1999} }



@inproceedings{
PV:JYD+01,
   Author = {Joon-Young, Park and Yido, Koo and Deog-Kyoon, Jeong and Wonchan, Kim and Changsik, Yoo and Changhyun, Kim},
   Title = {A high-speed memory interface circuit tolerant to PVT variations and channel noise},
   BookTitle = {European Solid-State Circuits Conference},
   Pages = {293-296},
   Abstract = {A high-speed I/O circuit for the memory interface is implemented in a 0.25&#181;m CMOS technology. To increase the sensitivity of the input circuit, the receiver employs the positive feedback. For driving of signal with the proper slew rate and specified voltage level under PVT variations, the pro-posed output circuit includes the novel level detection circuit and slew rate control scheme.},
      Year = {2001} }



@article{
PV:JZM04,
   Author = {Joyner, J. W. and Zarkesh-Ha, P. and Meindl, J. D.},
   Title = {Global interconnect design in a three-dimensional system-on-a-chip},
   Journal = {IEEE Transactions on Very Large Scale Integration (VLSI) Systems},
   Volume = {12},
   Number = {4},
   Pages = {367-372},
   Note = {1063-8210},
   Abstract = {A stochastic model for the global net-length distribution of a three-dimensional system-on-a-chip (3D-SoC) is derived. Using the results of this model, a global interconnect design window for a 3D-SoC is established by evaluating the constraints of: 1) wiring area; 2) clock wiring bandwidth; and 3) crosstalk noise. This window elucidates the optimum 3D-SoC global interconnect parameters for minimum pitch, minimum aspect ratio, and maximum clock frequency. In comparison to a two-dimensional system-on-a-chip (2D-SoC), the design window expands for a 3D-SoC to allow greater flexibility of interconnect parameters, thus increasing the guardbands to process variations. In addition, the limit on the maximum global clock frequency is revealed to increase as S/sup 2/, where S is the number of strata. This increase in on-chip signaling rate, however, comes at the expense of I/O density, highlighting the need for new high-density-I/O packaging techniques to exploit the full potential of 3D-SoC.},
   Keywords = {VLSI
circuit layout CAD
crosstalk
integrated circuit interconnections
integrated circuit layout
logic partitioning
network routing
system-on-chip
clock wiring bandwidth
crosstalk noise
global interconnect design
global net-length distribution
global wiring
integrated circuit interconnection
maximum clock frequency
minimum aspect ratio
minimum pitch
minimum rectilinear Steiner tree length
stochastic model
three-dimensional system-on-a-chip
wiring area},
   Year = {2004} }



@inproceedings{
PV:JM94,
   Author = {Jyu, H. and Malik, S.},
   Title = {Statistical Delay Modeling in Logic Design and Synthesis},
   BookTitle = {Design Automation Conference},
   Pages = {126-130},
   Abstract = {Manufacturing disturbances are inevitable in the fabrication of integrated circuits. These disturbances will result in variations in the delay specifications of manufactured circuits. In order to capture the impact of these variations on the delay behavior of these circuits we propose a pair of statistical delay models for use in logic design. These models abstract the real variations from the process level and can be used for statistical delay analysis and optimization in logic design and synthesis while offering an efficiency vs. accuracy tradeoff.},
      Year = {1994} }



@inproceedings{
PV:tutorial-aspdac06,
   Author = {Kahng, A. and Scheffer, L. and Orshansky, M. and Strojwas, A.},
   Title = {Tutorial: DFM Tools and Methodologies for 65 nm and Below},
   BookTitle = {Asia and South Pacific Design Automation Conference},
      Year = {2006} }



@inproceedings{
PV:tutorial-date06,
   Author = {Kahng, A. and Scheffer, L. and Orshansky, M. and Strojwas, A.},
   Title = {Tutorial: DFM Tools and Methodologies for 65 nm and Below},
   BookTitle = {Design Automation and Test in Europe},
      Year = {2006} }



@inproceedings{
PV:KNC02,
   Author = {Kao, J. and Narendra, S. and Chandrakasan, A.},
   Title = {Subthreshold leakage modeling and reduction techniques  },
   BookTitle = {International Conference on Computer Aided Design},
   Pages = {141-148},
   Abstract = {As technology scales, subthreshold leakage currents grow exponentially and become an increasingly large component of total power dissipation. CAD tools to help model and manage subthreshold leakage currents will be needed for developing ultra low power and high performance integrated circuits. This paper gives an overview of current research to control leakage currents, with an emphasis on areas where CAD improvements will be needed. The first part of the paper explores techniques to model subthreshold leakage currents at the device, circuit, and system levels. Next, circuit techniques such as source biasing, dual V/sub t/ partitioning, MTCMOS, and VTCMOS are described. These techniques reduce leakage currents during standby states and minimize power consumption. This paper also explores ways to reduce total active power by limiting leakage currents and optimally trading off between dynamic and leakage power components.},
   Keywords = {CMOS digital integrated circuits
circuit CAD
circuit optimisation
integrated circuit design
integrated circuit modelling
leakage currents
logic CAD
low-power electronics
sequential circuits
CAD tools
IC total power dissipation
MTCMOS
VTCMOS
device/circuit/system level modelling
digital circuits
dual voltage partitioning
dynamic/leakage power component trade-off optimization
high performance integrated circuits
sequential circuits
source biasing
standby states
subthreshold leakage current control/limitation
subthreshold leakage modeling/reduction techniques
total active power reduction
ultra low power IC},
   Year = {2002} }



@inproceedings{
PV:KYT+02,
   Author = {Karnik, T. and Yibin, Ye and Tschanz, J. and Liqiong, Wei and Burns, S. and Govindarajulu, V. and De, V. and Borkar, S.},
   Title = {Total power optimization by simultaneous dual-Vt allocation and device sizing in high performance microprocessors},
   BookTitle = {Design Automation Conference},
   Pages = {486-491},
   Abstract = {We describe various design automation solutions for design migration to a dual-Vt process technology. We include the results of a Lagrangian relaxation based tool, iSTATS, and a heuristic iterative optimization flow. Joint dual-Vt allocation and sizing reduces total power by 10+% compared with Vt allocation alone, and by 25+% compared with pure sizing methods. The heuristic flow requires 5&times; larger computation runtime than iSTATS due to its iterative nature},
   Keywords = {Lagrangian relaxation based tool
VLSI
circuit optimisation
computation runtime
design automation solutions
design migration
device sizing
heuristic iterative optimization flow
high performance microprocessors
iSTATS
integrated circuit design
iterative methods
iterative nature
logic CAD
microprocessor chips
simultaneous dual-Vt allocation
total power optimization
Lagrangian relaxation based tool
VLSI
circuit optimisation
computation runtime
design automation solutions
design migration
device sizing
heuristic iterative optimization flow
high performance microprocessors
iSTATS
integrated circuit design
iterative methods
iterative nature
logic CAD
microprocessor chips
simultaneous dual-Vt allocation
total power optimization},
   Year = {2002} }



@inproceedings{
PV:KDN+03,
   Author = {Khandelwal, V. and Davoodi, A. and Nanavati, A. and Srivastava, A.},
   Title = {A probabilistic approach to buffer insertion},
   BookTitle = {International Conference on Computer Aided Design (ICCAD)},
   Pages = {560-567},
   Abstract = {This work presents a formal probabilistic approach for solving optimization problems in design automation. Prediction accuracy is very low especially at high levels of design flow. This can be attributed mainly to unawareness of low level layout information and variability in fabrication process. Hence a traditional deterministic design automation approach where each cost function is represented as a fixed value becomes obsolete. A new approach is gaining attention in which the cost functions are represented as probability distributions and the optimization criteria is probabilistic too. This design optimization philosophy is demonstrated through the classic buffer insertion problem. Formally, we capture wirelengths as probability distributions (as compared to the traditional approach which considers wirelength as fixed values) and present several strategies for optimizing the probabilistic criteria. During the course of this work many problems are proved to be NP-Complete. Comparisons are made with the Van-Ginneken "optimal under fixed wire-length" algorithm. Results show that the Van-Ginneken approach generated delay distributions at the root of the fanout wiring tree which had large probability (0.91 in the worst case and 0.55 on average) of violating the delay constraint. Our algorithms could achieve 100% probability of satisfying the delay constraint with similar buffer penalty. Although this work considers wirelength prediction inaccuracies, our probabilistic strategy could be extended trivially to consider fabrication variability in wire parasitics.},
   Keywords = {buffer circuits
circuit optimisation
electronic design automation
probability
NP-complete problems
Van-Ginneken approach
buffer insertion
cost function
delay constraint
delay distributions
design automation
design optimization
probabilistic approach
probability distributions
wirelength prediction
wiring tree},
   Year = {2003} }



@inproceedings{
PV:KRH+03,
   Author = {Kim, C. H. and Roy, K. and Hsu, S. and Alvandpour, A. and Krishnamurthy, R. K. and Borkar, S.},
   Title = {A process variation compensating technique for sub-90 nm dynamic circuits},
   BookTitle = {Symposium on VLSI Circuits},
   Pages = {205-206},
   Abstract = {A process variation compensating technique for dynamic circuits is described for sub-90 nm technologies where leakage variation is severe. A keeper whose effective strength is optimally programmable based on die leakage enables 10% faster performance, 35% reduction in delay variation, and 5x reduction in robustness failing dies over conventional static keeper design in 90 nm dual-V/sub t/ CMOS.},
   Keywords = {CMOS integrated circuits
integrated circuit design
90 nm
conventional static keeper design
die leakage
dual-V/sub t/ CMOS
dynamic circuits
process variation compensating method
robustness},
   Year = {2003} }



@inproceedings{
PV:KTB+05,
   Author = {Kim, N. and Taeho, Kgil and Bowman, K. and De, V. and Mudge, T.},
   Title = {Total power-optimal pipelining and parallel processing under process variations in nanometer technology},
   BookTitle = {International Conference on Computer Aided Design},
   Pages = {535-540},
   Abstract = {This paper explores the effectiveness of the simultaneous application of pipelining and parallel processing as a total power (static plus dynamic) reduction technique in digital systems. Previous studies have been limited to either pipelining or parallel processing, but both techniques can be used together to reduce supply voltage at a fixed throughput point. According to our first-order analyses, there exist optimal combinations of pipelining depth and parallel processing width to minimize total power consumption. We show that the leakage power from both subthreshold and gate-oxide tunneling plays a significant role in determining the optimal combination of pipelining depth and parallel processing width. Our experiments are conducted with timing information derived from a 65nm technology and fanout-of-four (FO4) inverter chains. The experiments show that the optimal combinations of both pipelining and parallel processing - 8 /spl sim/ 12 /spl times/ FO4 logic depth pipelining with 2 /spl sim/ 3-wide parallel processing - can reduce the total power by as much as 40% compared to an optimal system using only pipelining or parallel processing alone. We extend our study to show how process parameter variations - an increasingly important factor in nanometer technologies - affects these results. Our analyses reveal that the variations shift the optimal points to shallower pipelining and narrower parallel processing - 12 /spl times/ FO4 logic depth with 2-wide parallel processing - at a fixed yield point.},
   Keywords = {logic design
nanotechnology
parallel processing
pipeline processing
65 nm
digital system
fanout-of-four inverter chain
first-order analysis
gate-oxide tunneling
leakage power
logic depth pipelining
nanometer technology
parallel processing
process variation
subthreshold tunneling
timing information
total power reduction
total power-optimal pipelining},
   Year = {2005} }



@inproceedings{
PV:KP00,
   Author = {Kishor, M. and Pineda de Gyvez, J.},
   Title = {Threshold voltage and power-supply tolerance of CMOS logic design families},
   BookTitle = {IEEE International Symposium on Defect and Fault Tolerance in VLSI Systems},
   Pages = {349-357},
   Abstract = {The advent of deep submicron technologies brings new challenges to digital circuit design. A reduced threshold voltage (V<sub>T</sub>) and power supply (V<sub>dd</sub>) in addition to process variabilities have a direct impact on circuit design. In a semiconductor environment it is conventionally thought that parametric yield is high and stable and that the main yield losses are functional. Although functional yield remains the main focus of attention, modern and future circuits may not have the presumed high parametric yield. We present a study that compares the tolerance to process variability of various design families for metrics including timing and power consumption under V<sub>T</sub>-V<sub>dd</sub> scalability using a NAND gate as a test vehicle. Basically, the fundamental limitations to the scaling of the supply voltage due to the statistical variation of MOS V<sub>T</sub> are investigated and defined. The four logic families under study are: static CMOS, Differential Complementary Voltage Swing Logic (DCVSL), Domino and Pass Logic},
   Keywords = {CMOS logic circuits
ULSI
VLSI
integrated circuit design
logic design
timing
tolerance analysis
CMOS logic design families
DCVSL
NAND gate test vehicle
deep submicron technologies
differential complementary voltage swing logic
digital circuit design
domino logic
pass logic
power consumption
power supply tolerance
process variability
static CMOS logic
supply voltage scaling
threshold voltage statistical variation
threshold voltage tolerance
timing
voltage scalability},
   Year = {2000} }



@inproceedings{
PV:KKR+04,
   Author = {KleinOsowski, A. J. and KleinOsowski, K. and Rangarajan, V. and Ranganath, P. and Lilja, D. J.},
   Title = {The recursive nanobox processor grid: a reliable system architecture for unreliable nanotechnology devices},
   BookTitle = {International Conference on  Dependable Systems and Networks },
   Pages = {167-176},
   Abstract = {Advanced molecular nanotechnology devices are expected to have exceedingly high transient fault rates and large numbers of inherent device defects compared to conventional CMOS devices. We introduce the recursive nanobox processor grid as an application specific, fault-tolerant, parallel computing system designed for fabrication with unreliable nanotechnology devices. In this initial study we construct VHDL models of the nanobox processor cell ALU and evaluate the effectiveness of our recursive fault masking approach in the presence of random transient errors. Our analysis shows that the ALU can calculate correctly 100 percent of the time with raw FIT (failures in time) rates as high as 10/sub 23/. We achieve this error correction with an area overhead on the order of 9x, which is quite reasonable given the high integration densities expected with nanodevices.},
   Keywords = {digital arithmetic
fault tolerant computing
grid computing
hardware description languages
nanotechnology
parallel processing
ALU
CMOS devices
VHDL models
VLSI
fault injection
fault masking
fault-tolerant computing
molecular nanotechnology devices
nanobox processor cell
parallel computing
recursive nanobox processor grid
system architecture},
   Year = {2004} }



@inproceedings{
PV:KAC+06,
   Author = {Korkmaz, Pinar and Akgul, Bilge E. S. and Chakrapani, Lakshmi N. and Palem, Krishna V.},
   Title = {Ultra Efficient Embedded SOC Architectures based on Probabilistic CMOS Technology},
   BookTitle = {Design Automation and Test in Europe},
      Year = {2006} }



@inproceedings{
PV:KAP06,
   Author = {Korkmaz, P. and Akgul, B. E. S. and Palem, K. V.},
   Title = {Ultra-low energy computing with noise: energy performance probability},
   BookTitle = {IEEE Computer Society Annual Symposium on  Emerging VLSI Technologies and Architectures },
   Volume = {00},
   Pages = {6 pp.},
   Abstract = {Noise susceptibility and power density have become two limiting factors to CMOS technology scaling. As a solution to these challenges, probabilistic CMOS (PCMOS) based computing has been proposed. PCMOS devices are inherently probabilistic devices that compute correctly with a probability p. This paper investigates the trade-offs between the energy, performance and probability of correctness (p) of a PCMOS inverter. Using simple analytical models of energy, delay and p of a PCMOS inverter, the optimum energy delay product (EDP) value for given probability and performance constraints is found. The analytical models are validated using circuit simulations for a PCMOS inverter designed in a 0.13/spl mu/m process. The results show that operating the PCMOS inverter at lower supply voltages is more preferable in terms of minimizing EDP. Our analysis is useful in optimal (in terms of EDP) circuit design for satisfying application requirements in terms of performance and probability of correctness. An analysis of the impacts of the variations in the temperature and the threshold voltage on the optimal EDP values is also included in the paper.},
   Keywords = {CMOS integrated circuits
circuit noise
integrated circuit modelling
invertors
low-power electronics
0.13 micron
PCMOS inverter
energy delay product value
energy performance probability
noise susceptibility
optimal circuit design
power density
probablilistoc CMOS based computing
ultra low energy computing},
   Year = {2006} }



@inproceedings{
PV:LLC+05,
   Author = {Li, Xin and Le, Jiayong and Celik, Mustafa and Pileggi, Lawrence T.},
   Title = {Defining Statistical Sensitivity for Timing Optimization of Logic Circuits With Large-Scale Process and Environmental Variations},
   BookTitle = {IEEE/ACM International Conference on Computer Aided Design},
   Pages = {844-851},
   Abstract = {The large-scale process and environmental variations for today's nanoscale ICs are requiring statistical approaches for timing analysis and optimization. Significant research has been recently focused on developing new statistical timing analysis algorithms, but often without consideration for how one should interpret the statistical timing results for optimization. In this paper (Li et al., 2005) we demonstrate why the traditional concepts of slack and critical path become ineffective under large-scale variations, and we propose a novel sensitivity-based metric to assess the "criticality" of each path and/or arc in the statistical timing graph. We define the statistical sensitivities for both paths and arcs, and theoretically prove that our path sensitivity is equivalent to the probability that a path is critical, and our arc sensitivity is equivalent to the probability that an arc sits on the critical path. An efficient algorithm with incremental analysis capability is described for fast sensitivity computation that has a linear runtime complexity in circuit size. The efficacy of the proposed sensitivity analysis is demonstrated on both standard benchmark circuits and large industry examples.},
      Year = {2005} }



@inproceedings{
PV:LW06,
   Author = {Liang, Deng and Wong, M. D. F.},
   Title = {An exact algorithm for the statistical shortest path problem},
   BookTitle = {Asia and South Pacific Conference on Design Automation},
   Pages = {6 pp.},
   Abstract = {Graph algorithms are widely used in VLSI CAD. Traditional graph algorithms can handle graphs with deterministic edge weights. As VLSI technology continues to scale into nanometer designs, we need to use probability distributions for edge weights in order to model uncertainty due to parameter variations. In this paper, we consider the statistical shortest path (SSP) problem. Given a graph G, the edge weights of G are random variables. For each path P in G, let L/sub P/ be its length, which is the sum of all edge weights on P. Clearly L/sub P/ is a random variable and we let /spl mu//sub P/, and /spl omega//sub P//sup 3/ be its mean and variance, respectively. In the SSP problem, our goal is to find a path P connecting two given vertices to minimize the cost function /spl mu//sub p/, + /spl Phi/ (/spl omega//sub P//sup 2/) where /spl Phi/ is an arbitrary function. (For example, if /spl Phi/ (/spl times/) /spl equiv/ the cost function is /spl mu//sub P/, + 3/spl omega//sub P/.) To minimize uncertainty in the final result, it is meaningful to look for paths with bounded variance, i.e., /spl omega//sub P//sup 2/ /spl les/ B for a given fixed bound B. In this paper, we present an exact algorithm to solve the SSP problem in O(B(V + E)) time where V and E are the numbers of vertices and edges, respectively, in G. Our algorithm is superior to previous algorithms for SSP problem because we can handle: 1) general graphs (unlike previous works applicable only to directed acyclic graphs), 2) arbitrary edge-weight distributions (unlike previous algorithms designed only for specific distributions such as Gaussian), and 3) general cost function (none of the previous algorithms can even handle the cost function /spl mu//sub P/, + 3/spl omega//sub P/. Finally, we discuss applications of the SSP problem to maze routing, buffer insertions, and timing analysis under parameter variations.},
   Keywords = {graph theory
network analysis
statistical analysis
arbitrary edge-weight distribution
buffer insertion
exact algorithm
general cost function
general graph
maze routing
parameter variation
statistical shortest path problem
timing analysis},
   Year = {2006} }



@inproceedings{
PV:LB06,
   Author = {Liang, X. and Brooks, D.},
   Title = {Performance Optimal Micro-architecture Parameters Selection Under the Impact of Process Variation},
   BookTitle = {International Conference on Computer Aided Design},
      Year = {2006} }



@inproceedings{
PV:LWY+05,
   Author = {Lizheng, Zhang and Weijen, Chen and Yuhen, Hu and Chen, C. C. P.},
   Title = {Statistical timing analysis with extended pseudo-canonical timing model},
   BookTitle = {Design, Automation and Test in Europe},
   Pages = {952-957 Vol. 2},
   Abstract = {State of the art statistical timing analysis (STA) tools often yield less accurate results when timing variables become correlated due to global source of variations and path reconvergence. To the best of our knowledge, no good solution is available for dealing both types of correlations simultaneously. In this paper, we present a novel extended pseudo-canonical timing model to retain and evaluate both types of correlation during statistical timing analysis with minimum computation cost. Also, an intelligent pruning method is introduced to enable trade-off runtime with accuracy. Tested with ISCAS benchmark suites, our method shows both high accuracy and high performance. For example, on the circuit c6288, our distribution estimation error shows 15/spl times/ accuracy improvement compared with previous approaches.},
   Keywords = {circuit complexity
circuit simulation
correlation methods
delays
error analysis
integrated circuit design
integrated circuit interconnections
integrated circuit modelling
statistical analysis
timing
ISCAS benchmark suites
STA tools
computation cost
correlated timing variables
distribution estimation error
extended pseudo-canonical timing model
global variations source
intelligent pruning method
path reconvergence
runtime accuracy trade-off
statistical timing analysis
timing performance},
   Year = {2005} }



@book{
PV:CLO02,
   Author = {M.Celik and P.Lawrence and A.Odabasioglu},
   Title = {IC interconnect analysis},
   Publisher = {Kluwer Publishers},
      Year = {2002} }



@book{
PV:MNB93,
   Author = {M.Evans and N.Hastings and B.Peacock},
   Title = {Statistical distributions},
   Publisher = {John Wiley and sons},
      Year = {1993} }



@inproceedings{
PV:MDO05,
   Author = {Mani, M. and Devgan, A. and Orshansky, M.},
   Title = {An efficient algorithm for statistical minimization of total power under timing yield constraints},
   BookTitle = {Design Automation Conference},
   Pages = {309-314},
   Abstract = {Power minimization under variability is formulated as a rigorous statistical robust optimization program with a guarantee of power and timing yields. Both power and timing metrics are treated probabilistically. Power reduction is performed by simultaneous sizing and dual threshold voltage assignment. An extremely fast run-time is achieved by casting the problem as a second-order conic problem and solving it using efficient interior-point optimization methods. When compared to the deterministic optimization, the new algorithm, on average, reduces static power by 31% and total power by 17% without the loss of parametric yield. The run time on a variety of public and industrial benchmarks is 30/spl times/ faster than other known statistical power minimization algorithms.},
   Keywords = {circuit optimisation
integrated circuit design
low-power electronics
minimisation
conic problem
deterministic optimization
dual threshold voltage assignment
interior-point optimization methods
power reduction
sizing assignment
statistical power minimization algorithms
timing yield constraints},
   Year = {2005} }



@inproceedings{
PV:MT05,
   Author = {Marculescu, D. and Talpes, E.},
   Title = {Variability and energy awareness: a microarchitecture-level perspective},
   BookTitle = {Design Automatin Conference},
   Pages = {11-16},
   Abstract = {This paper proposes microarchitecture-level models for within die (WID) process and system parameter variability that can be included in the design of high-performance processors. Since decisions taken at microarchitecture level have the largest impact on both performance and power, on one hand, and global variability effect, on the other hand, models and associated metrics are needed for their joint characterization and analysis. To assess how these variations affect or are affected by microarchitecture decisions, we propose a joint performance, power and variability metric that is able to distinguish among various design choices. As a design-driver for the modeling methodology, we consider a clustered high-performance processor implementation, along with its globally asynchronous, locally synchronous (GALS) counterpart. Results show that, when comparing the baseline, synchronous and its GALS counterpart, microarchitecture-driven impact of process variability translates into 2-10% faster local clocks for the GALS case, while when taking into account the effect of on-chip temperature variability, local clocks can be 8-18% faster. If, in addition, voltage scaling (DVS) is employed, the GALS architecture with DVS is 26% better in terms of the joint quality metric employing energy, performance, and variability.},
   Keywords = {logic design
microprocessor chips
power consumption
DVS
GALS design
WID process
global variability
microarchitecture decision
microarchitecture-level model
on-chip temperature variability
power consumption
system parameter variability
voltage scaling
within die process},
   Year = {2005} }



@book{
hls-book1,
   Author = {Mecheli, Giovanni De},
   Title = {Synthesis and Optimization of Digital Circuits},
   Publisher = {McGraw Hill},
   Address = {New York},
      Year = {1994} }



@inproceedings{
PV:MCH05,
   Author = {Pan, M. and Chu, C. and Zhou, H.},
   Title = {Timing yield estimation using statistical static timing analysis},
   BookTitle = {ISCAS},
   Pages = {2461-2464 Vol. 3},
   Abstract = {As process variations become a significant problem in deep sub-micron technology, a shift from deterministic static timing analysis to statistical static timing analysis for high-performance circuit designs could reduce the excessive conservatism that is built into current timing design methods. We address the timing yield problem for sequential circuits and propose a statistical approach to handle it. We consider the spatial and path reconvergence correlations between path delays, set-up time and hold time constraints, and clock skew due to process variations. We propose a method to get the timing yield based on the delay distributions of register-to-register paths in the circuit On average, the timing yield results obtained by our approach have average errors of less than 1.0% in comparison with Monte Carlo simulation. Experimental results show that shortest path variations and clock skew due to process variations have considerable impact on circuit timing, which could bias the timing yield results. In addition, the correlation between longest and shortest path delays is not significant.},
   Keywords = {delays
integrated circuit layout
integrated circuit modelling
integrated circuit yield
parameter estimation
sequential circuits
statistical analysis
statistical distributions
timing
circuit model
circuit timing
clock skew
deep sub-micron technology
delay distributions
deterministic static timing analysis
high-performance circuit designs
hold time constraints
path delays
process variations
register-to-register paths
sequential circuits
set-up time constraints
shortest path variations
statistical analysis
statistical static timing analysis
timing yield estimation},
   Year = {2005} }



@inproceedings{
PV:MMR04,
   Author = {Mukhopadhyay, S. and Mahmoodi-Meimand, H. and Roy, K.},
   Title = {Modeling and estimation of failure probability due to parameter variations in nano-scale SRAMs for yield enhancement},
   BookTitle = {Symposium on VLSI Circuits},
   Pages = {64-67},
   Abstract = {In this paper we have analyzed and modeled the failure probabilities (access time failure, read/write stability failure, and hold stability failure in the stand-by mode) of SRAM cells due to process parameter variations. A method to predict the yield of a memory chip designed with a cell is proposed based on the cell failure probability. The developed method can be used in the early stage of a design cycle to optimize the design for yield enhancement.},
   Keywords = {CMOS integrated circuits
SRAM chips
integrated circuit reliability
integrated circuit yield
access time failure
failure probability
hold stability failure
nano-scale SRAMs
parameter variations
read/write stability failure
stand-by mode
yield enhancement},
   Year = {2004} }



@inproceedings{
PV:MR03,
   Author = {Mukhopadhyay, S. and Roy, K.},
   Title = {Modeling and estimation of total leakage current in nano-scaled-CMOS devices considering the effect of parameter variation},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {172-175},
   Abstract = {In this paper we have developed analytical models to estimate the mean and the standard deviation in the gate, the subthreshold, the reverse biased source/drain junction band-to-band tunneling (BTBT) and the total leakage in scaled CMOS devices considering variation in process parameters like device geometry, doping profile, flat-band voltage and supply voltage. We have verified the model using Monte Carlo simulation using an NMOS device of 50 nm effective length and analyzed the results to enumerate the effect of different process parameters on the individual components and the total leakage.},
   Keywords = {CMOS integrated circuits
MOSFET
Monte Carlo methods
integrated circuit modelling
leakage currents
low-power electronics
nanotechnology
semiconductor device models
sensitivity analysis
tunnelling
50 nm
50 nm effective length
Monte Carlo simulation
NMOS device
analytical models
device geometry
doping profile
flat-band voltage
gate leakage
nano-scaled-CMOS devices
process parameter variation
reverse biased source/drain junction band-to-band tunneling
sensitivity analysis
subthreshold leakage
supply voltage
threshold voltage
total leakage current},
   Year = {2003} }



@inproceedings{
PV:NAJ05,
   Author = {Najm, F. N.},
   Title = {On the need for statistical timing analysis},
   BookTitle = {Design Automation Conference},
   Pages = {764-765},
   Abstract = {Traditional corner analysis fails to guarantee a target yield for a given performance metric. However, recently proposed solutions, in the form of statistical timing analysis, which work by propagating delay distributions, do not conform to modern design methodology. Instead, new statistical techniques are needed to modify corner analysis in ways that overcome its weaknesses without violating usage models of timing tools in modern flows.},
   Keywords = {circuit analysis computing
integrated circuit design
integrated circuit modelling
integrated circuit yield
statistical analysis
circuit analysis computing
corner analysis
delay distributions
statistical analysis
statistical timing analysis
timing circuits},
   Year = {2005} }



@inproceedings{
PV:NM04,
   Author = {Najm, F. N. and Menezes, N.},
   Title = {Statistical timing analysis based on a timing yield model},
   BookTitle = {Design Automation Conference},
   Pages = {460-465},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:NG92,
   Author = {Narayan, S. and Gajski, D. D.},
   Title = {System clock estimation based on clock slack minimization},
   BookTitle = {European Design Automation Conference},
   Pages = {66-71},
   Abstract = {When estimating a hardware implementation from behavioral descriptions, an important decision is the selection of a clock cycle to schedule the datapath operations into control steps. Traditional high-level synthesis systems require the designer to specify the clock cycle explicitly or express operator delays in terms of multiples of a clock cycle. The authors present an algorithm for clock estimation from dataflow graphs, based on clock slack minimization. This will provide both designers and synthesis tools with a realistic estimate of the clock cycle that can be used to implement a design. By using real life components and examples, it is shown that the clock estimates produced by this method yield faster execution times for the designs, as compared to the maximum operator delay methods. It is observed that the designs scheduled with the clock cycle estimates have faster execution times regardless of the components finally allocated for implementing the design during synthesis},
   Keywords = {circuit layout CAD
delays
formal specification
logic CAD
behavioral descriptions
clock cycle
clock estimates
clock slack minimization
control steps
datapath operations
hardware implementation
high-level synthesis systems
operator delays
system clock estimation},
   Year = {1992} }



@inproceedings{
PV:NAS00a,
   Author = {Nassif, S.},
   Title = {Delay variability: sources, impacts and trends},
   BookTitle = {IEEE International  Solid-State Circuits Conference },
   Pages = {368-369},
   Abstract = {The electrical performance of an integrated circuit is impacted by (a) environmental factors which include variations in power supply voltage and temperature, and(b) physical factors caused by processing and mask imperfections. Only the physical sources of variability, denoted P, are dealt with. P includes device and wire model parameters such as V<sub>th</sub>, T<sub>ox</sub> and R<sub>s</sub>},
   Keywords = {delays
integrated circuit design
integrated circuit modelling
masks
wiring
IC design
delay variability
device model parameters
mask imperfections
physical factors
processing imperfections
wire model parameters},
   Year = {2000} }



@inproceedings{
PV:NAS98,
   Author = {Nassif, S. R.},
   Title = {Within-chip variability analysis},
   BookTitle = {International Electron Devices Meeting},
   Pages = {283-286},
   Abstract = {Current, integrated circuits are large enough that device and interconnect parameter variations within it chip are as important as those same variations from chip to chip. Previously, digital designers were concerned only with chip-to-chip variability, for which analysis techniques exist; concern for within-chip variations has been in the domain of analog circuit design. In this paper, we lay the groundwork needed to analyze the impact of inter-chip variations on digital circuits and propose an extreme-case analysis algorithm to efficiently determine the worst case performance due to such variability},
   Keywords = {digital integrated circuits
integrated circuit design
device parameters
digital integrated circuit design
extreme-case analysis algorithm
interconnect parameters
within-chip variability analysis},
   Year = {1998} }



@inproceedings{
PV:NAS00,
   Author = {Nassif, S. R.},
   Title = {Design for variability in  deep submicron (DSM)  technologies]},
   BookTitle = {International Symposium on Quality Electronic Design},
   Pages = {451-454},
   Abstract = {Process-induced parameter variations cause performance fluctuations and are an important consideration in the design of high performance digital ICs. Until recently, it was sufficient to model die-to-die shifts in device (active) and wire (passive) parameters, leading to a natural worst-case design methodology. In the deep-submicron era, however, within-die variations in these same device and wire parameters become just as important. In fact, current integrated circuits are large enough that variations within the die are as large as variations from die-to-die. Furthermore, while die-to-die shifts are substantially independent of the design, within-die variations are profoundly influenced by the detailed physical implementation of the IC. This changes the fundamental view of process variability from something that is imposed on the design by the fabrication process to something that is co-generated between the design and the process. This paper starts by examining the sources and historical trends in device and wire variability, distinguishing between inter-die and intra-die variations, and proposes techniques for design for variability (DOV) in the presence of both types of variations},
   Keywords = {VLSI
digital integrated circuits
integrated circuit design
deep submicron technologies
design for variability
device parameters
device variability
die-to-die shifts
high performance digital ICs
integrated circuits
inter-die variations
intra-die variations
performance fluctuations
process variability
process-induced parameter variations
wire parameters
wire variability
within-die variations},
   Year = {2000} }



@inproceedings{
PV:NAS01,
   Author = {Nassif, S. R.},
   Title = {Modeling and analysis of manufacturing variations},
   BookTitle = {IEEE Conference on Custom Integrated Circuits},
   Pages = {223-228},
   Abstract = {Process-induced variations are an important consideration in the design of integrated circuits. Until recently, it was sufficient to model die-to-die shifts in device performance, leading to the well known worst-case modeling and design methodology. However, current and near-future integrated circuits are large enough that device and interconnect parameter variations within the chip are as important as those same variations from chip to chip. This presents a new set of challenges for process modeling and characterization and for the associated design tools and methodologies. This paper examines the sources and trends of process variability, the new challenges associated with the increase in within-die variability analysis, and proposes a modeling and simulation methodology to deal with this variability},
   Keywords = {integrated circuit manufacture
integrated circuit modelling
probability
statistical analysis
IC design
device parameter variations
integrated circuits
interconnect parameter variations
manufacturing variations
modeling methodology
process characterization
process modeling
process variability
process-induced variations
simulation methodology
within-die variability analysis},
   Year = {2001} }



@inproceedings{
PV:NBH04,
   Author = {Nassif, S. R. and Boning, D. and Hakim, N.},
   Title = {The care and feeding of your statistical static timer},
   BookTitle = {IEEE/ACM International Conference on Computer Aided Design},
   Pages = {138-139},
   Abstract = {The integrated circuit fabrication process has inevitable imperfections and fluctuations that had resulted in ever-growing systematic and random variations in the electrical parameters of active and passive devices fabricated as stated in S. Nassif (2001). The impact of such variations on various aspects of chip performance has been the subject of numerous recent papers, and techniques for analyzing and dealing with such variability roadly labeled design for manufacturability (DFM) - are emerging from research laboratories to practical implementation and deployment, and several service companies are actively engaged in implementing and promoting DFM techniques amongst semiconductor design and manufacturing organizations.},
   Keywords = {design for manufacture
integrated circuit design
statistical process control
active devices
broadly labeled design for manufacturability
chip performance
electrical parameters
integrated circuit fabrication
manufacturing organizations
passive devices
semiconductor design
statistical static timer},
   Year = {2004} }



@inproceedings{
PV:NBG98,
   Author = {Natarajan, S. and Breuer, M. A. and Gupta, S. K.},
   Title = {Process variations and their impact on circuit operation},
   BookTitle = {International Symposium on Defect and Fault Tolerance in VLSI Systems},
   Pages = {73-81},
   Abstract = {The statistical variations in electrical parameters, such as transistor gain factors and interconnect resistances, due to variations in the manufacturing process are studied using data obtained from a 0.8 &mu;m CMOS process. The impact of these variations and correlations on circuit operation is illustrated. Examples show that circuit delay can increase from the mean by about 100% due to crosstalk effects aggravated by process variations. Case studies emphasize the need for a tighter coupling between fabrication and circuit design and the need for new design corners based on process information},
   Keywords = {CMOS integrated circuits
VLSI
crosstalk
delays
integrated circuit design
integrated circuit interconnections
statistical analysis
0.8 micron
CMOS process
circuit delay
circuit operation
crosstalk effects
design corners
electrical parameters
interconnect resistances
manufacturing process
process variations
statistical variations
transistor gain factors},
   Year = {1998} }



@inproceedings{PV:NS06,
   Author = {Neiroukh, Osama and Son, Xiaoyu},
   Title = {Improving the Process-Variation Tolerance of Digital Circuits Using Gate Sizing and
Statistical Techniques},
   BookTitle = {Design Automation and Test in Europe},
      Year = {2006} }



@inproceedings{
PV:NDO+03,
   Author = {Nguyen, D. and Davare, A. and Orshansky, M. and Chinnery, D. and Thompson, B. and Keutzer, K.},
   Title = {Minimization of dynamic and static power through joint assignment of threshold voltages and sizing optimization [logic IC design]},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {158-163},
   Abstract = {We describe an optimization strategy for minimizing total power consumption using dual threshold voltage (Vth) technology. Significant power savings are possible by simultaneous assignment of Vth with gate sizing. We propose an efficient algorithm based on linear programming that jointly performs Vth assignment and gate sizing to minimize total power under delay constraints. First, linear programming assigns the optimal amounts of slack to gates based on power-delay sensitivity. Then, an optimal gate configuration, in terms of Vth and transistor sizes, is selected by an exhaustive local search. Benchmark results for the algorithm show 32% reduction in power consumption on average, compared to sizing only power minimization. There is up to a 57% reduction for some circuits. The flow can be extended to dual supply voltage libraries to yield further power savings.},
   Keywords = {circuit optimisation
integrated circuit design
linear programming
logic design
low-power electronics
delay constraints
dual threshold voltage technology
dynamic power minimization
gate timing slack
linear programming
logic IC design
power-delay sensitivity
sizing optimization
static power minimization
threshold voltage assignment},
   Year = {2003} }



@inproceedings{
PV:tutorial-dac006,
   Author = {Nikolic, B. and Habitz, P. and Radojcic, R. and Lin, X.},
   Title = {Tutorial: Practical Aspects of Coping with Variability: An Electrical View},
   BookTitle = {Design Automation Conference},
      Year = {2006} }



@inproceedings{
PV:NSF01,
   Author = {Nikolic, K. and Sadek, A. and Forshaw, M.},
   Title = {Architectures for reliable computing with unreliable nanodevices},
   BookTitle = {1st IEEE Conference on  Nanotechnology},
   Pages = {254-259},
   Abstract = {As electronic devices get smaller and smaller, so the probability of errors in manufacturing increases, and the need to use fault-tolerant techniques. This paper compares the relative performance of four such techniques: R-fold multiple redundancy; cascaded triple modular redundancy; von Neumann's multiplexing method; and a reconfigurable computer technique. It is shown that manufacturing defect rates of the order of 0.01 to 0.1 will require enormous amounts of redundancy, of the order of 10<sup>3</sup> to 10<sup>5</sup>},
   Keywords = {fault tolerant computing
multiplexing
nanotechnology
reconfigurable architectures
redundancy
R-fold multiple redundancy
cascaded triple modular redundancy
electronic device
fault tolerant technique
manufacturing defect probability
reconfigurable architecture
reliable computing
unreliable nanodevice
von Neumann multiplexing},
   Year = {2001} }



@inproceedings{
PV:OK05,
   Author = {Ohashi, K. and Kaneko, M.},
   Title = {Statistical schedule length analysis in asynchronous datapath synthesis},
   BookTitle = {IEEE International Symposium on Circuits and Systems .  },
   Pages = {700-703 Vol. 1},
   Abstract = {The paper proposes statistical schedule length analysis for evaluating schedule and datapath during asynchronous datapath synthesis. In order to handle the randomness of delay variation mathematically, the execution time of each operation is modeled by a stochastic variable, and an algorithm to calculate the distribution of total computation time of an application is presented. The proposed statistical schedule length analysis is then incorporated with resource binding and scheduling to form an asynchronous datapath synthesis system. Our system tends to generate better solutions than the conventional one in the mean total computation time, when the size of a target algorithm becomes larger, the number of functional units becomes larger, and the variance of execution delay of each module becomes larger. Experimental results have been obtained for a differential equation solver, a wave digital filter and elliptic wave filters.},
   Keywords = {VLSI
delays
digital signal processing chips
graph theory
logic design
processor scheduling
resource allocation
statistical analysis
stochastic processes
DSP chips
VLSI system
asynchronous datapath synthesis
combinatorial circuit
delay variation
dependence graph
digital circuit
digital signal processing
resource binding
resource sharing
scheduling graph
statistical schedule length analysis
stochastic variable
total computation time},
   Year = {2005} }



@inproceedings{
PV:OYO03,
   Author = {Okada, K. and Yamaoka, K. and Onodera, H.},
   Title = {Statistical modeling of gate-delay variation with consideration of intra-gate variability},
   BookTitle = {International Symposium on Circuits and Systems},
   Volume = {5},
   Pages = {V-513-V-516 vol.5},
   Abstract = {This paper proposes a model to calculate statistical gate-delay variation caused by intra-chip and inter-chip variabilities. The variation of each gate delay directly influences the variation of circuit delay, so it is important to characterize each gate-delay variation accurately. Our model characterizes the gate delay by transistor characteristics. Every transistor in a gate affects the transient characteristics of the gate, so it is indispensable to consider the intra-gate variability for the modeling of gate-delay variation. This effect is not captured in a statistical delay analysis reported so far. Our model characterizes a statistical gate-delay variation using a response surface method (RSM) and represents the intra-gate variability with a few parameters. We evaluate the accuracy of our model, and we show some simulated results of a circuit delay variation.},
   Keywords = {capacitance
delays
integrated circuit modelling
logic simulation
response surface methodology
RSM
circuit delay
circuit delay variation
gate-delay variation
inter-chip variabilities
intra-gate variability
response surface method
statistical modeling
transient characteristics
transistor characteristics},
   Year = {2003} }



@inproceedings{
PV:OB04,
   Author = {Orshansky, M. and Bandyopadhyay, A.},
   Title = {Fast statistical timing analysis handling arbitrary delay correlations},
   BookTitle = {Design Automation Conference},
   Pages = {337-342},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:OK02,
   Author = {Orshansky, M. and Keutzer, K.},
   Title = {A general probabilistic framework for worst case timing analysis},
   BookTitle = {Design Automation Conference},
   Pages = {556-561},
   Abstract = {The traditional approach to worst-case static-timing analysis is becoming unacceptably conservative due to an ever-increasing number of circuit and process effects. We propose a fundamentally different framework that aims to significantly improve the accuracy of timing predictions through fully probabilistic analysis of gate and path delays. We describe a bottom-up approach for the construction of joint probability density function of path delays, and present novel analytical and algorithmic methods for finding the full distribution of the maximum of a random path delay space with arbitrary path correlations.},
   Keywords = {circuit CAD
delays
logic CAD
logic gates
probability
timing
arbitrary path correlations
bottom-up approach
circuit effects
gate delays
general probabilistic framework
joint probability density function
path delays
process effects
static-timing analysis
timing predictions
worst case timing analysis},
   Year = {2002} }



@article{
PV:OMC04,
   Author = {Orshansky, M. and Milor, L. and Chenming, Hu},
   Title = {Characterization of spatial intrafield gate CD variability, its impact on circuit performance, and spatial mask-level correction},
   Journal = {IEEE Transactions on Semiconductor Manufacturing},
   Volume = {17},
   Number = {1},
   Pages = {2-11},
   Note = {0894-6507},
   Abstract = {The authors present a comprehensive characterization method applied to the study of the state-of-the-art 18-/spl mu/m CMOS process. Statistical characterization of gate CD reveals a large spatial intrafield component, strongly dependent on the local layout patterns. The authors describe the statistical analysis of this data and demonstrate the need for such comprehensive characterization. They describe the experimental setup of the novel measurement-based characterization approach that is capable of capturing all the relevant CD variation patterns necessary for accurate circuit modeling and statistical design for increased performance and yield. Characterization is based upon an inexpensive electrically based measurement technique. A rigorous statistical analysis of the impact of intrafield variability on circuit performance is undertaken. They show that intrafield CD variation has a significant detrimental effect on the overall circuit performance that may be as high as 25%. Moreover, they demonstrate that the spatial component of gate CD variability, rather than the proximity-dependent component, is predominantly responsible for speed degradation. In order to reduce the degradation of circuit performance and yield, the authors propose a mask-level spatial gate CD correction algorithm to reduce the intrafield and overall variability and provide an analytical model to evaluate the effectiveness of correction for variance reduction. They believe that potentially significant benefits can be achieved through implementation of this compensation technique in the production environment.},
   Keywords = {CMOS integrated circuits
integrated circuit layout
integrated circuit modelling
integrated circuit yield
masks
18 micron
CMOS
circuit modeling
circuit performance
gate CD
local layout patterns
mask-level spatial gate CD correction algorithm
proximity-dependent component
spatial intrafield component
spatial intrafield gate CD variability
spatial mask-level correction
statistical analysis
statistical design
yield},
   Year = {2004} }



@inproceedings{
PV:Pal03,
   Author = {Palem, K. V.},
   Title = {Energy Aware Algorithm Design via Probabilistic Computing: From Algorithms and Models to Moore's Law and Novel (Semiconductor)Devices},
   BookTitle = {International  conference on compilers, architecture and synthesis for embedded systems (CASES)},
      Year = {2003} }



@article{
PV:Pal05,
   Author = {Palem, K. V.},
   Title = {Energy aware computing through probabilistic switching: a study of limits},
   Journal = { IEEE Transactions on Computers },
   Volume = {54},
   Number = {9},
   Pages = {1123-1137},
   Note = {0018-9340},
   Abstract = {The main result in this paper establishes the energy savings derived by using probabilistic AND as well as NOT gates constructed from an idealized switch that produces a probabilistic bit (PBIT). A probabilistic switch produces the desired value as an output that is 0 or 1 with probability p, represented as a PBIT, and, hence, can produce the wrong output value with a probability of (1-p). In contrast with a probabilistic switch, a conventional deterministic switch produces a BIT whose value is always correct. Our switch-based gate constructions are a particular case of a systematic methodology developed for building energy-aware networks for computing, using PBITS. Interesting examples of such networks include AND, OR, and NOT gates (or, as functions, Boolean conjunction, disjunction, and negation, respectively). To quantify the energy savings, novel measures of "technology independent" energy complexity are also introduced - these measures parallel conventional machine-independent notions of computational complexity such as the algorithm's running time and space. Networks of switches can be related to Turing machines and to Boolean circuits, both of which are widely known and well-understood models of computation. Our gate and network constructions lend substance to the following thesis (established for the first time by K.V. Palem): the mathematical technique referred to as randomization yielding probabilistic algorithms results in energy savings through a physical interpretation based on statistical thermodynamics and, hence, can serve as a basis for energy-aware computing. While the estimates of the energy saved through PBIT-based probabilistic computing switches and networks developed rely on the constructs and thermodynamic models due to Boltzmann, Gibbs, and Planck, this work has also led to the innovation of probabilistic CMOS-based devices and computing frameworks. Thus, for completeness, the relationship between the physical models on which this work is based and the electrical domain of CMOS-based switching is discussed.},
   Keywords = {CMOS logic circuits
energy conservation
logic gates
probabilistic logic
probability
switching theory
CMOS-based device
NOT gate
Turing machine
computational complexity
energy aware computing
low-power design
probabilistic AND gate
probabilistic bit
probabilistic switching
statistical thermodynamics
switch-based gate construction
Index Terms- Energy-aware systems
low-power design
probabilistic computation.},
   Year = {2005} }



@inproceedings{
PV:PCZ05,
   Author = {Pan, Min and Chu, Chris Chong-Nuen and Zhou, Hai},
   Title = {Timing Yield Estimation Using Statistical Static Timing Analysis},
   BookTitle = {IEEE International Symposium on Circuits and Systems},
   Abstract = {As process variations become a significant problem in
deep sub-micron technology, a shift from deterministic static timing
analysis to statistical static timing analysis for high-performance
circuit designs could reduce the excessive conservatism that is built
into current timing design method. In this paper, we address the
timing yield problem for sequential circuits and propose a
statistical approach to handle it. In our approach, we consider the
spatial and path reconvergence correlations between path delays,
set-up time and hold time constraints, as well as clock skew due to
process variations. We propose a method to get the timing yield
based on the delay distributions of register-to-register paths in the
circuit. On average, the timing yield results obtained by our
approach have average errors of less than 1.0% in comparison with
Monte Carlo simulation. Experimental results show that shortest
path variations and clock skew due to process variations have
considerable impact on circuit timing, which could bias the timing
yield results. In addition, the correlation between longest and
shortest path delays is not significant.},
      Year = {2005} }



@inproceedings{
PV:PLW+05,
   Author = {Papanikolaou, A. and Lobmaier, F. and Wang, H. and Miranda, M. and Catthoor, F.},
   Title = {A system-level methodology for fully compensating process variability impact
of memory organizations in periodic applications},
   BookTitle = {International Symposium on System Synthesis and Hardware/Software Codesign},
   Pages = {117-122},
      Year = {2005} }



@inproceedings{
PV:PYK+05,
   Author = {Patil, D. and Yun, S. and Kim, S. J. and Cheung, A. and Horowitz, M. and Boyd, S.},
   Title = {A new method for design of robust digital circuits},
   BookTitle = {International Symposium on Quality of Electronic Design},
   Pages = {676-681},
   Abstract = {As technology continues to scale beyond 100 nm, there is a significant increase in performance uncertainty of CMOS logic due to process and environmental variations. Traditional circuit optimization methods assuming deterministic gate delays produce a flat "wall" of equally critical paths, resulting in variation-sensitive designs. This paper describes a new method for sizing of digital circuits, with uncertain gate delays, to minimize their performance variation leading to a higher parametric yield. The method is based on adding margins on each gate delay to account for variations and using a new "soft maximum" function to combine path delays at converging nodes. Using analytic models to predict the means and standard deviations of gate delays as polynomial functions of the device sizes, we create a simple, computationally efficient heuristic for uncertainty-aware sizing of digital circuits via geometric programming. Monte-Carlo simulations on custom 32 bit adders and ISCAS'85 benchmarks show that about 10 % to 20 % delay reduction over deterministic sizing methods can be achieved, without any additional cost in area.},
   Keywords = {CMOS logic circuits
Monte Carlo methods
adders
circuit optimisation
delays
geometric programming
integrated circuit design
logic design
32 bit
CMOS logic
Monte-Carlo simulations
adders
circuit optimization
device sizes
digital circuit sizing
geometric programming
parametric yield
path delays
performance uncertainty
robust digital circuit design
soft maximum function
standard deviations
uncertain gate delays},
   Year = {2005} }



@inproceedings{
PV:tutorial-vlsid06,
   Author = {Puri, R. and Karnik, T. and Joshi, R.},
   Title = {Tutorial: Technology Impacts on sub-90nm CMOS Circuit Design  Design methodologies},
   BookTitle = {International Conference on VLSI Design and Embedded Systems},
      Year = {2006} }



@inproceedings{
PV:QMB+05,
   Author = {Qikai, Chen and Mahmoodi, H. and Bhunia, S. and Roy, K.},
   Title = {Modeling and testing of SRAM for new failure mechanisms due to process variations in nanoscale CMOS},
   BookTitle = {VLSI Test Symposium},
   Pages = {292-297},
   Abstract = {In this paper, we have made a complete analysis of the emerging SRAM failure mechanisms due to process variations and mapped them to fault models. We have proposed two efficient test solutions for the process variation related failures in SRAM: (a) modification of March sequence, and (b) a low-overhead DFT circuit to complement the March test for an overall test time reduction of 29%, compared to the existing test technique with similar fault coverage.},
   Keywords = {CMOS memory circuits
SRAM chips
design for testability
failure analysis
integrated circuit modelling
integrated circuit testing
nanotechnology
March sequence
SRAM
efficient test solutions
failure mechanisms
fault coverage
fault models
low-overhead DFT circuit
nanoscale CMOS
process variations
test time reduction
DFT
Failure mechanixm
March Test
Process Variation
SRAM},
   Year = {2005} }



@inproceedings{
PV:RVW04,
   Author = {Raj, S. and Vrudhula, S. and Wang, J.},
   Title = {A methodology to improve timing yield in the presence of process variations},
   BookTitle = {DAC},
   Pages = {448-453},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:RDB+04,
   Author = {Rajeev, R. Rao and Devgan, A. and Blaauw, D. and Sylvester, D.},
   Title = {Parametric yield estimation considering leakage variability},
   BookTitle = {Design Automation Conference},
   Pages = {442-447},
   Abstract = {NA},
      Year = {2004} }



@inproceedings{
PV:RDH+02,
   Author = {Ramanujam, J. and Deshpande, S. and Hong, J. and Kandemir, M.},
   Title = {A heuristic for clock selection in high-level synthesis},
   BookTitle = {15th International Conference on VLSI Design},
   Pages = {414-419},
   Abstract = {Clock selection has a significant impact on the performance and quality of designs in high-level synthesis. In most synthesis systems, a convenient value of the clock is chosen or exact (and expensive) methods have been used for clock selection. This paper presents a novel heuristic approach for near-optimal clock selection for synthesis systems. This technique is based on critical paths in the dataflow graph. In addition, we introduce and exploit a new figure of merit called the activity factor to choose the best possible clock. Extensive experimental results show that the proposed technique is very fast and produces optimal solutions in a large number of cases; in those cases, where it is not optimal, we are off by just a few percent from optimal},
   Keywords = {circuit CAD
clocks
data flow graphs
high level synthesis
scheduling
timing
DFG
HLS systems
RTL structural description
activity factor
clock selection
critical paths
dataflow graph
figure of merit
high-level synthesis
near-optimal selection
register transfer level
sequencing graph},
   Year = {2002} }



@inproceedings{
PV:RAD+05,
   Author = {Rao, R. and Agarwal, K. and Devgan, A. and Nowka, K. and Sylvester, D. and Brown, R.},
   Title = {Parametric yield analysis and constrained-based supply voltage optimization},
   BookTitle = {International Symposium on Quality of Electronic Design},
   Pages = {284-290},
   Abstract = {Parametric yield loss has become a serious concern in leakage dominated technologies. We discuss the impact of leakage on parametric yield and show that leakage can cause yield windows to shrink by imposing a two-sided constraint on the window. We present a mathematical framework for yield estimation under device process variation for given power and frequency constraints. The model is validated against Monte Carlo simulations for an industry process and is shown to have typical error of less than 5%. We then demonstrate the importance of optimal supply voltage selection for yield maximization. We also investigate the sensitivity of parametric yield to applied frequency and power constraints. Finally, we apply the proposed framework to the problem of maximizing the shipping frequency in the presence of given yield and power constraints.},
   Keywords = {electric potential
integrated circuit yield
optimisation
parameter estimation
constraint-based supply voltage optimization
device process variation
frequency constraints
integrated circuits
leakage dominated technologies
nanometer regime
parametric yield analysis
power constraints
shipping frequency
yield estimation
yield maximization},
   Year = {2005} }



@inproceedings{
PV:RSB+03,
   Author = {Rao, R. and Srivastava, A. and Blaauw, D. and Sylvester, D.},
   Title = {Statistical estimation of leakage current considering inter- and intra-die process variation},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {84-89},
   Abstract = {We develop a method to estimate the variation of leakage current due to both intra-die and inter-die gate length process variability. We derive an analytical expression to estimate the probability density function (PDF) of the leakage current for stacked devices found in CMOS gates. These distributions of individual gate leakage currents are then combined to obtain the mean and variance of the leakage current for an entire circuit. We also present an approach to account for both the inter- and intra-die gate length variations to ensure that the circuit leakage PDF correctly models both types of variation. The proposed methods were implemented and tested on a number of benchmark circuits. Comparison to Monte-Carlo simulation validates the accuracy of the proposed method and demonstrates the efficiency of the proposed analysis method. Comparison with traditional deterministic leakage current analysis demonstrates the need for statistical methods for leakage current analysis.},
   Keywords = {CMOS integrated circuits
Monte Carlo methods
circuit simulation
integrated circuit manufacture
integrated circuit modelling
leakage currents
probability
statistical analysis
CMOS gates
Monte-Carlo simulation
benchmark circuits
circuit leakage PDF
deterministic leakage current analysis
gate leakage current distributions
gate length process variability
inter-die process variation
intra-die process variation
leakage current
leakage current variance
mean leakage current
models
probability density function
stacked devices
statistical estimation
statistical methods},
   Year = {2003} }



@article{
PV:RSB+04,
   Author = {Rao, R. and Srivastava, A. and Blaauw, D. and Sylvester, D.},
   Title = {Statistical analysis of subthreshold leakage current for VLSI circuits},
   Journal = {IEEE Transactions on Very Large Scale Integration (VLSI) Systems},
   Volume = {12},
   Number = {2},
   Pages = {131-139},
   Note = {1063-8210},
   Abstract = {We develop a method to estimate the variation of leakage current due to both intra-die and inter-die gate length process variability. We derive an analytical expression to estimate the probability density function (PDF) of the leakage current for stacked devices found in CMOS gates. These distributions of individual gate leakage currents are then combined to obtain the mean and variance of the leakage current for an entire circuit. We also present an approach to account for both the inter- and intra-die gate length variations to ensure that the circuit leakage PDF correctly models both types of variation. The proposed methods were implemented and tested on a number of benchmark circuits. Comparison to Monte Carlo simulation validates the accuracy of the proposed method and demonstrates the efficiency of the proposed analysis method. Comparison with traditional deterministic leakage current analysis demonstrates the need for statistical methods for leakage current analysis.},
   Keywords = {CMOS integrated circuits
Monte Carlo methods
VLSI
integrated circuit modelling
leakage currents
statistical distributions
CMOS gates
Monte Carlo simulation
VLSI circuits
gate length process variability
probability density function
stacked devices
statistical analysis
subthreshold leakage current},
   Year = {2004} }



@inproceedings{
PV:tutorial-isscc06,
   Author = {Rohrer, Norman},
   Title = {Tutorial: Introduction to Statistical Variation and Techniques for Design Optimization},
   BookTitle = {International Solid State Circuits Conference},
      Year = {2006} }



@inproceedings{
PV:CKP+05,
   Author = {S Cheemalavagu and Korkmaz, P and Palem, K. V. and Akgul, B. and Chakrapani, L.},
   Title = {A probabilistic CMOS switch and its realization by exploiting noise},
   BookTitle = {IFIP-VLSI SoC},
      Year = {2005} }



@inproceedings{
PV:SAM04,
   Author = {Samaan, S. B.},
   Title = {The impact of device parameter variations on the frequency and performance of VLSI chips},
   BookTitle = {IEEE/ACM International Conference on Computer Aided Design},
   Pages = {343-346},
   Abstract = {The distance-correlated (continuous) within-die (WID) process variations of transistor parameters appears to be approximately scaling with process generations. Furthermore, shrinking clock cycles and the scaling of functional block dimensions in complex chips (e.g. CPUs), cause a shortening of interconnect distances. These effects mitigate correlated variations' impact on delay changes across a die. Temperature has a small effect, and supply distribution can be well-understood and designed. Furthermore, uncorrelated (random) variations (e.g. RDF, & LER) currently have a small impact on speed-setting paths, and even multiplying their effect (as processes shrink), would not make them very significant. Coupled with methods for estimating the shift in the maximum operating frequency (F/sub max/) of a die (due to variations), it is shown that variations will continue to have a small effect on product speeds through the mid-term future.},
   Keywords = {VLSI
frequency response
network analysis
VLSI chips
complex chips
device parameter variations
distance-correlated within-die process variations
functional block dimensions
maximum operating frequency shift
random variations
shrinking clock cycles
speed-setting path
transistor parameters
uncorrelated variations},
   Year = {2004} }



@inproceedings{
PV:tutorial-dac05b,
   Author = {Sapatnakar, S. and De, V. and Orshansky, M. and Hakim, N.},
   Title = {Tutorial: Statistical Performance Analsysis and Optimization of Digital Circuits},
   BookTitle = {Design Automation Conference},
      Year = {2005} }



@book{
PV:timing-book,
   Author = {Sapatnekar, S.},
   Title = {Timing},
   Publisher = {Kluwer Academic Publishers},
      Year = {2004} }



@inproceedings{
PV:Tutorial-dac05a,
   Author = {Scheffer, L. and Nassif, S. and Strojwas, A. and Koenemann, B. and NS, N.},
   Title = {Tutorial: Design for Manufacturing at 65 nm and Below},
   BookTitle = {Design Automation Conference},
      Year = {2005} }



@inproceedings{
PV:SER04,
   Author = {Sery, G.},
   Title = {Impact of process variation phenomena on performance and quality assessment},
   BookTitle = {International Conference on Integrated Circuit Design and Technology},
   Pages = {25},
   Abstract = {Summary form only given. Logic product density and performance trends have continued to follow the course predicted by Moore's Law. To support the trends in the future and build logic products approaching one billion or more transistors before the end of the decade, several challenges must be met. These challenges include: 1) maintaining transistor/interconnect feature scaling, 2) the increasing power density dilemma, 3) increasing relative difficulty of 2-D feature resolution and general critical dimension control, 4) identifying cost effective solutions to increasing process and design database complexity, and 5), improving general performance and quality predictability in the face of the growing control, complexity and predictability issues. The trend in transistor scaling can be maintained while addressing the power density issue with new transistor structures, design approaches, and product architectures (e.g. high-k, metal gate, etc.). Items 3 to 5 are the focus of this work and are also strongly inter-related. The general 2-D patterning and resolution control problems will require several solution approaches both through design and technology e.g. reduce design degrees of freedom, use of simpler arrayed structures, improved uniformity, improved tools, etc. The data base complexity/cost problem will require solutions likely to involve use of improved data structure, improved use of hierarchy, and improved software and hardware solutions. Performance assessment, predictability and quality assessment will benefit from solutions to the control and complexity issues noted above. In addition, new design techniques/tools as well as improved process characterization models and methods can address the general performance/quality assessment challenge.},
   Keywords = {circuit CAD
circuit complexity
integrated circuit design
integrated logic circuits
logic CAD
technological forecasting
2-D feature resolution
2-D patterning
cost effective solutions
critical dimension control
design database complexity
increasing power density
logic product
performance challenges
predictability
process complexity
process variation phenomena
quality assessment
transistor scaling},
   Year = {2004} }



@inproceedings{
PV:SPR04,
   Author = {Choi, S. and Paul, B. C. and Roy, K.},
   Title = {Novel sizing algorithm for yield improvement under process variation in nanometer technology},
   BookTitle = {DAC},
   Pages = {454-459},
   Abstract = {NA},
      Year = {2004} }



@misc{PV:BT03,
   Author = {Shekhar, Borkar and Tanay, Karnik and Siva, Narendra and Jim, Tschanz and Ali, Keshavarzi and Vivek, De},
   Title = {Parameter variations and impact on circuits and microarchitecture},
   Publisher = {ACM Press},
   Note = {775920
338-342},
         Year = {2003} }



@inproceedings{
PV:SMO05,
   Author = {Singh, A. K. and Mani, M. and Orshansky, M.},
   Title = {Statistical technology mapping for parametric yield},
   BookTitle = {International Conference on Computer Aided Design (ICCAD)},
   Pages = {511-518},
   Abstract = {The increasing variability of process parameters leads to substantial parametric yield losses due to timing and leakage power constraints. Leakage power is especially affected by variability because of its exponential dependence on the highly varying transistor channel length and threshold voltage. This paper describes the new technology mapping algorithm that performs library binding to maximize parametric yield limited both by timing and power constraints. This is the first work that rigorously treats variability in circuit leakage power and delay within logic synthesis. Experiments show that moving the concerns about variability into logic synthesis is justified. The results on industrial and public benchmarks indicate that, on avenge, the reduction in stand-by power can be up to 26% and can be as high as 50% for some benchmarks. The reduction is purely due to a more effective decision-making of the mapping algorithm, and is achieved without a timing parametric yield loss. Alternatively, the algorithm leads to the delay reduction of up to 17%, with a 10% avenge possible reduction across the benchmarks, for stringent leakage constraints at a fixed yield level. Parametric yield at a fixed leakage target can also be substantially increased. In some examples, the statistical mapper leads to a 80% yield at the leakage value for which the deterministic mapper guaranteed only a 50% yield.},
   Keywords = {integrated circuit design
integrated circuit yield
logic design
statistical analysis
timing
decision-making
leakage power constraint
library binding
logic synthesis
parametric yield
statistical technology mapping
threshold voltage
varying transistor channel length},
   Year = {2005} }



@inproceedings{
PV:SH05,
   Author = {Sinha, D. and Hai, Zhou},
   Title = {A unified framework for statistical timing analysis with coupling and multiple input switching},
   BookTitle = {IEEE/ACM International Conference on Computer-Aided Design},
   Pages = {837-843},
   Abstract = {As technology scales to smaller dimensions, increasing process variations, coupling induced delay variations and multiple input switching effects make timing verification extremely challenging. In this paper, we establish a theoretical framework for statistical timing analysis with coupling and multiple input switching. We prove the convergence of our proposed iterative approach and discuss implementation issues under the assumption of a Gaussian distribution for the parameters of variation. A statistical timer based on our proposed approach is developed and experimental results are presented for the IS-CAS benchmarks. We juxtapose our timer with a single pass, non iterative statistical timer that does not consider the mutual dependence of coupling with timing and another statistical timer that handles coupling deterministically. Monte Carlo simulations reveal a distinct gain (up to 24%) in accuracy by our approach in comparison to the others mentioned.},
   Keywords = {Gaussian distribution
Monte Carlo methods
delays
integrated circuit modelling
iterative methods
switching
timing
Gaussian distribution
IS-CAS benchmarks
Monte Carlo simulations
coupling induced delay variations
iterative approach
multiple input switching
noniterative statistical timer
process variations
statistical timing analysis
timing verification},
   Year = {2005} }



@inproceedings{
PV:SSH05,
   Author = {Sinha, D. and Shenoy, N. V. and Hai, Zhou},
   Title = {Statistical gate sizing for timing yield optimization},
   BookTitle = {IEEE/ACM International Conference on Computer-Aided Design},
   Pages = {1037-1041},
   Abstract = {Variability in the chip design process has been relatively increasing with technology scaling to smaller dimensions. Using worst case analysis for circuit optimization severely over-constrains the system and results in solutions with excessive penalties. Statistical timing analysis and optimization have consequently emerged as a refinement of the traditional static timing approach for circuit design optimization. In this paper, we propose a statistical gate sizing methodology for timing yield improvement. We build statistical models for gate delays from library characterizations at multiple process corners and operating conditions. Statistical timing analysis is performed, which drives gate sizing for timing yield optimization. Experimental results are reported for the ISCAS and MCNC benchmarks. In addition, we provide insight into statistical properties of gate delays for a given technology library which intuitively explains when and why statistical optimization improves over static timing optimization.},
   Keywords = {circuit optimisation
delays
integrated circuit design
integrated circuit yield
statistical analysis
IC design
gate delay
library characterization
multiple process corner
statistical gate sizing
statistical timing analysis
timing yield optimization},
   Year = {2005} }



@article{
PV:SK04,
   Author = {Sirisantana, N. and Kaushik, Roy},
   Title = {Low-power design using multiple channel lengths and oxide thicknesses},
   Journal = {IEEE Design and Test of Computers},
   Volume = {21},
   Number = {1},
   Pages = {56-63},
   Note = {0740-7475},
   Abstract = {Two CMOS design techniques use dual threshold voltages to reduce power consumption while maintaining high performance. Simulation results show power savings of 21% for one technique at low activity, and for the other, 19% at high activity and 38% at tow activity.},
   Keywords = {CMOS digital integrated circuits
electronic engineering computing
integrated circuit design
low-power electronics
CMOS design techniques
CMOS digital circuits
dual threshold voltages
electronic engineering computing
power consumption},
   Year = {2004} }



@inproceedings{
PV:SWB04,
   Author = {Songqing, Zhang and Wason, V. and Banerjee, K.},
   Title = {A Probabilistic Framework to Estimate Full-Chip Subthreshold Leakage Power Distribution Considering Within-Die and Die-to-Die P-T-V Variations},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {156-161},
   Abstract = {This paper presents a probabilistic framework for full-chip estimation of subthreshold leakage power distribution considering both within-die and die-to-die variations in process (P), temperature (T) and supply voltage (V). The results obtained under this framework are compared to BSIM results and are found to be more accurate in comparison to those obtained from existing statistical models. Using this framework, a quantitative analysis of the relative sensitivities of subthreshold leakage to P-T-V variations has been presented. For the first time, the effects of die-to-die channel length and temperature variations on subthreshold leakage are studied in combination with all within-die variations. It has been shown that for accurate estimation of subthreshold leakage, it is important to consider die-to-die temperature variations which can significantly increase the leakage power due to electrothermal couplings between power and temperature. Furthermore, the full-chip leakage power distribution arising due to both within-die and die-to-die P-T-V is calculated, which is subsequently used to estimate the leakage constrained yield under the impact of these variations. The calculations show that the yield is significantly lowered under the impact of within-die and die-to-die process and temperature variations.},
   Keywords = {Subthreshold leakage power distribution
die-to-die variations
electrothermal couplings
process variations
within-die variations
yield estimation
Subthreshold leakage power distribution
die-to-die variations
electrothermal couplings
process variations
within-die variations
yield estimation},
   Year = {2004} }



@inproceedings{
PV:SBB+02,
   Author = {Srivastava, A. and Bai, R. and Blaauw, D. and Sylvester, D.},
   Title = {Modeling and analysis of leakage power considering within-die process variations},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {64-67},
   Abstract = {We describe the impact of process variation on leakage power for a 0.18 /spl mu/m CMOS technology. We show that variability, manifested in gate length (L/sub drawn/), gate oxide thickness (T/sub ox/), and channel dose (N/sub sub/), can drastically affect the leakage current. We first present Monte Carlo-based simulation results for leakage current in various CMOS gates when the process parameters are varied both individually and concurrently. We then derive an analytical model to estimate the mean and standard deviation of the leakage current as a function of the process parameter distributions. We demonstrate that the results of the analytical model match well with Monte-Carlo simulations and also show the statistical mean leakage current is significantly different from the leakage predicted using a nominal case file.},
   Keywords = {CMOS integrated circuits
Monte Carlo methods
circuit simulation
integrated circuit manufacture
integrated circuit modelling
leakage currents
statistical analysis
0.18 micron
CMOS gates
CMOS technology
Monte Carlo-based simulation
analytical model
channel dose
gate length
gate oxide thickness
leakage current
leakage power
mean leakage current deviation
modeling
process parameters
standard leakage current deviation
statistical mean leakage current
within-die process variations},
   Year = {2002} }



@inproceedings{
PV:SSA+05,
   Author = {Srivastava, A. and Shah, S. and Agarwal, K. and Sylvester, D. and Blaauw, D. and Director, S.},
   Title = {Accurate and efficient gate-level parametric yield estimation considering correlated variations in leakage power and performance},
   BookTitle = {Design Automation Conference},
   Pages = {535-540},
   Abstract = {Increasing levels of process variation in current technologies have a major impact on power and performance, and result in parametric yield loss. In this work we develop an efficient gate-level approach to accurately estimate the parametric yield defined by leakage power and delay constraints, by finding the joint probability distribution function (jpdf) for delay and leakage power. We consider inter-die variations as well as intra-die variations with correlated and random components. The correlation between power and performance arise due to their dependence on common process parameters and is shown to have a significant impact on yield in high-frequency bins. We also propose a method to estimate parametric yield given the power/delay jpdf that is much faster than numerical integration with good accuracy. The proposed approach is implemented and compared with Monte Carlo simulations and shows high accuracy, with the yield estimates achieving an average error of 2%.},
   Keywords = {Monte Carlo methods
delays
integrated circuit design
integrated circuit yield
leakage currents
network analysis
statistical distributions
Monte Carlo simations
correlated variations
delay constraints
gate-level parametric yield estimation
inter-die variations
intra-die variations
joint probability distribution function
leakage power variations
performance variations},
   Year = {2005} }



@book{
PV:michigan-book,
   Author = {Srivastava, A. and Sylvester, D. and Blaauw, D.},
   Title = {Statistical analysis and optimization for VLSI: Timing and Power},
   Publisher = {Springer},
      Year = {2005} }



@inproceedings{
PV:SHS+03,
   Author = {Stammermann, A. and Helms, D. and Schulte, M. and Schulz, A. and Nebel, W.},
   Title = {Binding allocation and floorplanning in low power high-level synthesis},
   BookTitle = {International Conference on Computer Aided Design},
   Pages = {544-550},
   Abstract = {This work is a contribution to high level synthesis for low power systems. While device feature size decreases, interconnect power becomes a dominating factor. Thus it is important that accurate physical information is used during high-level synthesis. We propose a new power optimisation algorithm for RT-level netlists. The optimisation performs simultaneously slicing-tree structure-based floorplanning and functional unit binding and allocation. Since floorplanning, binding and allocation can use the information generated by the other step, the algorithm can greatly optimise the interconnect power. Compared to interconnect unaware power optimised circuits, it shows that interconnect power can be reduced by an average of 41.2 %, while reducing overall power by 24.1 % on an average. The functional unit power remains nearly unchanged. These optimisations are not achieved at the expense of area.},
   Keywords = {circuit layout
circuit optimisation
high level synthesis
integrated circuit interconnections
low-power electronics
power consumption
RT level netlists
functional unit allocation
functional unit binding
high level synthesis
low power systems
power optimisation algorithm
power optimised circuits
slicing tree structure based floorplanning},
   Year = {2003} }



@inproceedings{
PV:HLD+03,
   Author = {Su, Haihua and Liu, F. and Devgan, A. and Acar, E. and Nassif, S.},
   Title = {Full chip leakage-estimation considering power supply and temperature variations},
   BookTitle = {International Symposium on Low Power Electronics and Design},
   Pages = {78-83},
   Abstract = {Leakage power is emerging as a key design challenge in current and future CMOS designs. Since leakage is critically dependent on operating temperature and power supply, we present a full chip leakage estimation technique which accurately accounts for power supply and temperature variations. State of the art techniques are used to compute the thermal and power supply profile of the entire chip. Closed-form models are presented which relate leakage to temperature and VDD variations. These models coupled with the thermal and VDD profile are used to generate an accurate full chip leakage estimation technique considering environmental variations. The results of this approach are demonstrated on large-scale industrial designs.},
   Keywords = {CMOS integrated circuits
circuit simulation
environmental degradation
integrated circuit design
integrated circuit measurement
integrated circuit modelling
leakage currents
low-power electronics
parameter estimation
temperature distribution
thermal analysis
CMOS designs
VDD profile
VDD variations
closed-form models
environmental variations
full chip leakage estimation
large-scale industrial designs
leakage power
operating temperature
power supply
power supply profile
power supply variations
temperature variations
thermal profile},
   Year = {2003} }



@article{
PV:SC01,
   Author = {Sylvester, D. and Chenming, Wu},
   Title = {Analytical modeling and characterization of deep-submicrometer interconnect},
   Journal = {Proceedings of the IEEE},
   Volume = {89},
   Number = {5},
   Pages = {634-664},
   Note = {0018-9219},
   Abstract = {This work addresses two fundamental concepts regarding deep-submicrometer interconnect. First, characterization of on-chip interconnect is considered with particular attention to ultrasmall capacitance measurement and in-situ noise evaluation techniques. An approach to measuring femto-Farad level wiring capacitances is presented that is based on the concept of supplying and removing charge with active devices. The method, called the charge-based capacitance measurement (CBCM) technique, has the advantages of being compact, having high-resolution, and being very simple. We also present a novel time-domain measurement scheme for on-chip crosstalk noise that is based on the use of cascaded high-speed differential pairs to compare a user-defined reference voltage to the unknown noise peak value. The noise measurement technique complements a delay measurement to directly evaluate the impact of capacitive coupling on delay for various victim and aggressor driver sizes as well as arbitrary waveform timing and phase alignments. The second area of emphasis in this work is analytical interconnect modeling. Several important effects are modeled, including a rigorous crosstalk noise model that also includes a timing-level model. Results from this noise model show it to provide accuracy within 10% of SPICE for a wide range of input parameters. The noise model can also be calibrated and verified with comparison to the noise measurement scheme described in this work. A fast Monte Carlo approach to modeling the circuit impact of back-end process variation is presented providing a better depiction of real 3-&sigma; performance spreads compared to the traditional skew-corner approach. Finally. A comprehensive system-level performance model called Berkeley Advanced Chip Performance Calculator (BACPAC) is developed that accounts for a number of relevant deep-submicrometer system design issues. BACPAC has been implemented online and is useful in exploring the capabilities of future very large scale integration systems as well as determining trends and tradeoffs inherent in the design process},
   Keywords = {Monte Carlo methods
VLSI
capacitance measurement
crosstalk
electric noise measurement
integrated circuit design
integrated circuit interconnections
integrated circuit measurement
integrated circuit modelling
integrated circuit noise
Berkeley Advanced Chip Performance Calculator
Monte Carlo model
VLSI design
analytical model
charge-based capacitance measurement
deep-submicron interconnect
system-level model
time-domain crosstalk noise measurement
timing-level model},
   Year = {2001} }



@inproceedings{
PV:SNC99,
   Author = {Sylvester, D. and Nakagawa, O. S. and Chenming, Hu},
   Title = {Modeling the impact of back-end process variation on circuit performance},
   BookTitle = {International Symposium on VLSI Technology, Systems, and Applications},
   Pages = {58-61},
   Abstract = {We present a stochastic approach to account for on-chip interconnect process variation. A Monte Carlo approach is taken using actual process distributions to generate realistic 3-D performance corners. Accurate analytical models are used to provide a &gt;3 order of magnitude speedup over simulation techniques. Resulting delay and noise performance spreads are 33 to 63% tighter than those found using a conventional technique. We apply this method to a clock distribution network to more precisely determine clock skew},
   Keywords = {CMOS digital integrated circuits
Monte Carlo methods
SPICE
ULSI
crosstalk
delays
integrated circuit interconnections
integrated circuit modelling
integrated circuit noise
3-D performance corners
Monte Carlo approach
ULSI
analytical models
back-end process variation
circuit performance
clock distribution network
clock skew
crosstalk
deep submicron CMOS
delay performance spread
noise performance spread
on-chip interconnect modeling
process distributions
stochastic approach},
   Year = {1999} }



@inproceedings{
PV:TDC+05,
   Author = {Teene, A. and Davis, B. and Castagnetti, R. and Brown, J. and Ramesh, S.},
   Title = {Impact of interconnect process variations on memory performance and design},
   BookTitle = {International Symposium on Quality of Electronic Design},
   Pages = {694-699},
   Abstract = {Interconnect-related effects have become significant factors that can affect complex nanometer designs, such as memories. Thus, a robust memory design methodology needs to include the accurate modeling of interconnect parasitics and interconnect process variations. In this paper we present a statistical design approach to study the impact of interconnect process variations on memory performance and design. This approach uses 3D parasitic extraction, circuit simulation, Monte Carlo and sensitivity analysis to determine the parasitic and performance sensitivities to interconnect process parameter variations for a 90 nm memory design example. The sensitivity analysis results can be used to optimize the memory circuit design and layout to further improve memory performance and robustness.},
   Keywords = {Monte Carlo methods
circuit simulation
digital storage
integrated circuit interconnections
integrated circuit layout
nanoelectronics
sensitivity analysis
3D parasitic extraction
90 nm
Monte Carlo
circuit simulation
complex nanometer designs
interconnect parasitics
interconnect process variations
memory circuit design
memory circuit layout
memory performance
performance
robust memory design
sensitivity analysis
statistical design},
   Year = {2005} }



@inproceedings{
PV:TY98,
   Author = {Tomiyama, H. and Yasuura, H.},
   Title = {Module selection using manufacturing information},
   BookTitle = {Asia and south pacific design automation conference},
   Pages = {275-281},
   Abstract = {Since manufacturing processes inherently fluctuate, LSI chips which are produced from the same design have different propagation delays. However, the difference in delays caused by the process fluctuation has rarely been considered in most high-level synthesis systems which were developed before. This paper presents a new approach to module selection in high-level synthesis, which exploits difference in functional unit delays. First, a module library model which assumes the probabilistic nature of functional unit delays is presented. Then, we propose a module selection problem and an algorithm which minimizes the cost per faultless chip. Experimental results demonstrate that the proposed algorithm finds the optimal module selection which would not have been explored without manufacturing information},
   Keywords = {delays
high level synthesis
large scale integration
manufacturing processes
LSI chips
high-level synthesis systems
manufacturing information
manufacturing processes
module library model
module selection
propagation delays},
   Year = {1998} }



@inproceedings{
PV:TBD05,
   Author = {Tschanz, J. and Bowman, K. and De, V.},
   Title = {Variation-tolerant circuits: circuit solutions and techniques},
   BookTitle = {Design Automation Conference},
   Pages = {762-73},
   Abstract = {Die-to-die and within-die variations impact the frequency and power of fabricated dies, affecting functionality, performance, and revenue. Variation-tolerant circuits and post-silicon tuning techniques are important for minimizing the impacts of these variations. This paper describes several circuit techniques that can be employed to ensure efficient circuit operation in the presence of ever-increasing variations.},
   Keywords = {integrated circuit design
body bias
circuit solutions
circuit techniques
die-to-die variations
parameter variation
post-silicon tuning techniques
variation-tolerant circuits
within-die variations},
   Year = {2005} }



@article{
PV:TKN+02,
   Author = {Tschanz, James and Kao, James and Narendra, Siva and Nair, Raj and Antoniadis, Dimitri and Chandrakasan, Anantha and De, Vivek},
   Title = {Apaptive Body Bias for Reducing Imapcts of Die-to-die and Within-Die Parameter Variaions on Microprocessor Frequency and Leakage},
   Journal = {IEEE Journal of Solid State Circuits},
   Volume = {37},
   Number = {11},
   Pages = {1396-1402},
      Year = {2002} }



@article{
PV:TNN+03,
   Author = {Tschanz, J. W. and Narendra, S. and Nair, R. and De, V.},
   Title = {Effectiveness of adaptive supply voltage and body bias for reducing impact of parameter variations in low power and high performance microprocessors},
   Journal = {IEEE Journal of Solid-State Circuits},
   Volume = {38},
   Number = {5},
   Pages = {826-829},
   Note = {0018-9200},
   Abstract = {Adaptive supply voltage as well as adaptive body bias may be used to control the frequency and leakage distribution of fabricated microprocessor dies. Test chip measurements show that adaptive V/sub CC/ is effective in reducing the impact of parameter variations on frequency, active power, and leakage power of microprocessors when 20 mV V/sub CC/ resolution is used. Using adaptive V/sub CC/ together with adaptive V/sub BS/ or within-die body bias is much more effective than using any of them individually.},
   Keywords = {CMOS digital integrated circuits
adaptive control
leakage currents
low-power electronics
microprocessor chips
voltage control
CMOS digital ICs
active power
adaptive body bias
adaptive supply voltage
forward bias
frequency
high performance microprocessors
leakage distribution control
leakage power
low-power microprocessors
parameter variations
within-die variation},
   Year = {2003} }



@inproceedings{
PV:TTF01,
   Author = {Tsukiyama, S. and Tanaka, M. and Fukui, M.},
   Title = {A statistical static timing analysis considering correlations between delays},
   BookTitle = {Asia and South Pacific Design Automation Conference},
   Pages = {353-358},
   Abstract = {In this paper, we present a new algorithm for the statistical static timing analysis of a CMOS combinatorial circuit, which can treat correlations of arrival times of input signals to a logic gate and correlations of switching delays in a logic gate. We model each switching delay by a normal distribution, and use a normal distribution of two stochastic variables with a coefficient of correlation for computing the distribution of output delay of a logic gate. Since the algorithm takes the correlation into account, the time complexity is O(n&middot;m) in the worst-case, where n and m are the numbers of vertices and edges of the acyclic graph representing a given combinatorial circuit},
   Keywords = {CMOS logic circuits
circuit complexity
combinational circuits
delays
graph theory
logic CAD
logic gates
statistical analysis
timing
CMOS combinatorial circuit
acyclic graph
arrival times
delay correlations
edges
input signals
logic gate
normal distribution
statistical static timing analysis
stochastic variables
switching delays
time complexity
vertices},
   Year = {2001} }



@article{
PV:TY06,
   Author = {Tung-Chieh, Chen and Yao-Wen, Chang},
   Title = {Modern floorplanning based on B-tree and fast simulated annealing},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems},
   Volume = {25},
   Number = {4},
   Pages = {637-650},
   Note = {0278-0070},
   Abstract = {Unlike classical floorplanning that usually handles only block packing to minimize silicon area, modern very large scale integration (VLSI) floorplanning typically needs to pack blocks within a fixed die (outline), and additionally considers the packing with block positions and interconnect constraints. Floorplanning with bus planning is one of the most challenging modern floorplanning problems because it needs to consider the constraints with interconnect and block positions simultaneously. In this paper, the authors study two types of modern floorplanning problems: 1) fixed-outline floorplanning and 2) bus-driven floorplanning (BDF). This floorplanner uses B/sup */-tree floorplan representation based on fast three-stage simulated annealing (SA) scheme called Fast-SA. For fixed-outline floorplanning, the authors present an adaptive Fast-SA that can dynamically change the weights in the cost function to optimize the wirelength under the outline constraint. Experimental results show that this floorplanner can achieve 100% success rates efficiently for fixed-outline floorplanning with various aspect ratios. For the BDF, the authors explore the feasibility conditions of the B/sup */-tree with the bus constraints, and develop a BDF algorithm based on the conditions and Fast-SA. Experimental results show that this floorplanner obtains much smaller dead space for the floorplanning with hard/soft macro blocks, compared with the most recent work. In particular, this floorplanner is more efficient than the previous works.},
   Keywords = {VLSI
circuit CAD
circuit optimisation
integrated circuit layout
simulated annealing
trees (mathematics)
B/sup */-tree
VLSI
bus driven floorplanning
bus planning
fast simulated annealing
fixed outline floorplanning
very large scale integration
Floorplanning
physical design},
   Year = {2006} }

@article{
PV:Intel_2006_Micro,
   Author = {Unsal, O. S. and Tschanz, J. W. and Bowman, K. and De, V. and Vera, X. and Gonzalez, A. and Ergin, O.},
   Title = {Impact of Parameter Variations on Circuits and Microarchitecture},
   Journal = {IEEE Micro},
   Volume = {26},
   Number = {6},
   Pages = {30-39},
   Note = {0272-1732},
   Abstract = {Parameter variations, which are increasing along with advances in process technologies, affect both timing and power. Variability must be considered at both the circuit and microarchitectural design levels to keep pace with performance scaling and to keep power consumption within reasonable limits. This article presents an overview of the main sources of variability and surveys variation-tolerant circuit and microarchitectural approaches},
   Keywords = {low-power electronics
microprocessor chips
circuit design
microarchitectural design
parameter variations
performance scaling
process technologies
variation-tolerant circuit approach
variation-tolerant microarchitectural approach
impact of VLSI on system design
performance and reliability
processor architectures
variation-tolerant design},
   Year = {2006} }



@inproceedings{
PV:VB05,
   Author = {Venkatraman, V. and Burleson, W.},
   Title = {Impact of process variations on multi-level signaling for on-chip interconnects},
   BookTitle = {International Conference on VLSI Design},
   Pages = {362-367},
   Abstract = {Global interconnects are widely acknowledged as a limiting factor in future on-chip designs. Novel interconnect driving techniques like multi-level signaling have been proposed to improve performance of on-chip interconnects. This paper presents the impact of process-induced parameter variation on multi-level signaling system for on-chip interconnects. The effects of parameter variations is analyzed by Monte Carlo simulations and parameter sensitivity analyses. Monte Carlo analyses show that the threshold voltage, effective gate length and supply voltage are the key parameters that influence interconnect delay and total average power. It also shows that the interconnect delay and total average power with multi-level signaling for 10 mm line in 100 nm technology are normally distributed with a standard deviation of around 7.8% and 14.55% respectively. Individual parameter sensitivity analyses show that the total average power is most influenced by threshold voltage and is least influenced by drain/source parasitic resistance and thickness of oxide. The impact of different technologies, which include 180 nm, 130 nm and 100 nm are analyzed and it can be seen that the impact of individual device variation on delay and power reduces as technology scaled down. Yield of high performance and low power bins in 180 nm technology under process variations is 30%, yield of high performance bins is 23.2% and yield of low power bins is 36.1%.},
   Keywords = {Monte Carlo methods
integrated circuit design
integrated circuit interconnections
100 nm
130 nm
180 nm
Monte Carlo simulations
drain/source parasitic resistance
effective gate length
integrated circuit design
interconnect delay
multilevel signaling
on-chip interconnects
oxide thickness
parameter sensitivity analysis
process variations
supply voltage
threshold voltage
total average power},
   Year = {2005} }



@inproceedings{
PV:KS07,
   Author = {Vishal Khandelwal and Srivastava, Ankur},
   Title = {Variability-Driven Formulation for Simultaneous Gate Sizing and Post-Silicon Tunability Allocation},
   BookTitle = {International Symposium on Physical Design},
   Pages = {11-18},
   Abstract = {Process variations cause design performance to become unpredictable in deep sub-micron technologies. Several statistical techniques (timing analysis, gate-sizing) have been proposed to counter these variations during design optimization. Another interesting approach to improve timing yield is post-silicon tunable (PST) clock-tree. In this work, we propose an integrated framework that performs simultaneous statistical gate-sizing in presence of PST clock-tree buffers for minimizing binning-yield loss (BYL) and tunability costs by determining the ranges of tuning to be provided at each buffer. The simultaneous gate-sizing and PST bu er range deter- mination problem is proved to be a convex stochastic programming formulation under longest path delay constraints and hence solved optimally. We further extend the formulation into a heuristic to additionally consider shortest path delay constraints. We make experimental comparisons using nominal gate sizing followed by PST bu er management using [12] as a base-case. We take the solution obtained from this approach and perform 1) Sensitivity-based statistical gate-sizing while retaining the PST clock tree 2) Simultaneous gate sizing and PST buffer range determination as proposed in this work. On an average, the BYL obtained from our approach is 98% lower than the base-case ([12]) and 95% lower than the sensitivity-based algorithm. On an average the base-case approach [12] gave 22% timing yield loss (YL), the sensitivity approach gave 19% YL, where as our proposed algorithm gave only 3% YL. The total PST tuning buffer range that is allocated through the proposed algorithm is comparable to that obtained from [12]. The proposed algorithm had a 2.2x runtime speedup compared to the sensitivity-based algorithm.
},
      Year = {2007} }



@inproceedings{
PV:VIS03,
   Author = {Visweswariah, C.},
   Title = {Death, taxes and failing chips},
   BookTitle = {Design Automation Conference},
   Pages = {343-347},
   Abstract = {In the way they cope with variability, present-day methodologies are onerous, pessimistic and risky, all at the same time! Dealing with variability is an increasingly important aspect of high-performance digital integrated circuit design, and indispensable for first-time-right hardware and cutting-edge performance. This invited paper discusses the methodology, analysis, synthesis and modeling aspects of this problem. These aspects of the problem are compared and contrasted in the ASIC and custom (microprocessor) domains. This paper pays particular attention to statistical timing analysis and enumerates desirable attributes that would render such an analysis capability practical and accurate.},
   Keywords = {application specific integrated circuits
design aids
integrated circuit design
integrated circuit reliability
microprocessor chips
statistical analysis
custom domains
design methodology
failing chips
microprocessor domains
parametric yield prediction
statistical timing analysis},
   Year = {2003} }



@inproceedings{
PV:VRK+04,
   Author = {Visweswariah, C. and Ravindran, K. and Kalafala, K. and Walker, S. G. and Narayan, S.},
   Title = {First-order incremental block-based statistical timing analysis},
   BookTitle = {DAC},
   Pages = {331-336},
   Abstract = {NA},
      Year = {2004} }



@article{
PV:VRK+06,
   Author = {Visweswariah, C. and Ravindran, K. and Kalafala, K. and Walker, S. G. and Narayan, S. and Beece, D. K. and Piaget, J. and Venkateswaran, N. and Hemmett, J. G.},
   Title = {First-order incremental block-based statistical timing analysis.},
   Journal = {IEEE Transactions on Computer-Aided Design of ICs and Systems},
   Abstract = {Variability in digital integrated circuits makes timing
verification an extremely challenging task. In this paper, a
canonical first order delay model is proposed that takes into
account both correlated and independent randomness. A novel
linear-time block-based statistical timing algorithm is employed
to propagate timing quantities like arrival times and required
arrival times through the timing graph in this canonical form.
At the end of the statistical timing, the sensitivity of all timing
quantities to each of the sources of variation is available. Excessive
sensitivities can then be targeted by manual or automatic
optimization methods to improve the robustness of the design.
This paper also reports the first incremental statistical timer
in the literature which is suitable for use in the inner loop of
physical synthesis or other optimization programs. The third
novel contribution of this paper is the computation of local
and global criticality probabilities. For a very small cost in
CPU time, the probability of each edge or node of the timing
graph being critical is computed. Numerical results are presented
on industrial ASIC chips with over two million logic gates,
and statistical timing results are compared to exhaustive corner
analysis on a chip design whose hardware showed early-mode
timing violations.},
      Year = {2006} }



@inproceedings{
PV:ZKC05,
   Author = {Wo, Zhaojun and Koren, I. and Ciesielski, M.},
   Title = {An ILP formulation for yield-driven architectural synthesis},
   BookTitle = {20th IEEE International Symposium on  Defect and Fault Tolerance in VLSI Systems },
   Pages = {12-20},
   Abstract = {Data flow graph dominant designs, such as communication video and audio applications, are common in today's IC industry. In these designs, the datapath resources (e.g., adders, multipliers) count more than 90% in area. Different datapath resources have very different properties in terms of area, delay, power and yield. Considering yield during system level design can result in significant benefits. A mixed integer linear programming (MILP) formulation for yield-aware architectural synthesis is presented in this paper. The proposed approach attempts to maximize the yield of the design while satisfying other constraints like area and delay. Through experiments on several benchmarks, we show that incorporating the yield as an objective during architectural synthesis can significantly improve the yield compared to conventional methods. Transistor sizing at the circuit level can also be incorporated in our method to further improve the yield.},
   Keywords = {data flow graphs
integer programming
integrated circuit design
integrated circuit yield
linear programming
circuit level transistor sizing
data flow graphs
datapath resources
mixed integer linear programming
system level design
yield-driven architectural synthesis},
   Year = {2005} }



@inproceedings{
PV:XSV01,
   Author = {Xiaoyun, Sun and Seonki, Kim and Vinnakota, B.},
   Title = {Crosstalk fault detection by dynamic I<sub>dd</sub>},
   BookTitle = {IEEE/ACM International Conference on Computer Aided Design},
   Pages = {375-378},
   Abstract = {Undesired capacitive crosstalk between signals is expected to be a significant concern in deep submicron circuits. New test techniques are needed for these crosstalk faults since they may cause unacceptable performance degradation. We analyze the impact of crosstalk faults on a circuit's power dissipation. Crosstalk faults can be detected by monitoring the dynamic supply current. The test method is based on a recently developed dynamic I<sub>dd</sub> test metric, the energy consumption ratio (ECR). ECR-based test has been shown to be effective at tolerating the impact of process variations. In this paper, we apply an ECR-based test method called ECR-VDD test to detect the crosstalk faults. The effectiveness of the method is demonstrated by simulation results},
   Keywords = {CMOS digital integrated circuits
VLSI
crosstalk
delays
fault diagnosis
integrated circuit testing
logic simulation
logic testing
low-power electronics
CMOS
capacitive crosstalk
crosstalk fault detection
deep submicron circuits
delays
dynamic I<sub>dd</sub>
dynamic supply current
energy consumption ratio
performance degradation
power dissipation
process variations
test techniques},
   Year = {2001} }



@inproceedings{
PV:XJM+05,
   Author = {Xin, Li and Jiayong, Le and Mustafa, Celik and Pileggi, L. T.},
   Title = {Defining statistical sensitivity for timing optimization of logic circuits with large-scale process and environmental variations},
   BookTitle = {IEEE/ACM International Conference on Computer-Aided Design},
   Pages = {844-851},
   Abstract = {The large-scale process and environmental variations for today's nanoscale ICs are requiring statistical approaches for timing analysis and optimization. Significant research has been recently focused on developing new statistical timing analysis algorithms, but often without consideration for how one should interpret the statistical timing results for optimization. In this paper (Li et al., 2005) we demonstrate why the traditional concepts of slack and critical path become ineffective under large-scale variations, and we propose a novel sensitivity-based metric to assess the "criticality" of each path and/or arc in the statistical timing graph. We define the statistical sensitivities for both paths and arcs, and theoretically prove that our path sensitivity is equivalent to the probability that a path is critical, and our arc sensitivity is equivalent to the probability that an arc sits on the critical path. An efficient algorithm with incremental analysis capability is described for fast sensitivity computation that has a linear runtime complexity in circuit size. The efficacy of the proposed sensitivity analysis is demonstrated on both standard benchmark circuits and large industry examples.},
   Keywords = {logic circuits
statistical analysis
timing
environmental variations
logic circuits
nanoscale IC
process variations
statistical timing analysis algorithms
timing optimization},
   Year = {2005} }



@inproceedings{
PV:XZV+06,
   Author = {Xiong, J. and Zolotov, V. and Visweswariah, C. and Venkateswaran., N.},
   Title = {Criticality computation in parameterized statistical timing},
   BookTitle = {ACM/IEEE international workshop on timing issues in the specification and synthesis of digital systems (TAU)},
   Pages = {119-124},
   Abstract = { },
      Year = {2006} }



@inproceedings{
PV:YNP+00,
   Author = {Ying, Liu and Nassif, S. R. and Pileggi, L. T. and Strojwas, A. J.},
   Title = {Impact of interconnect variations on the clock skew of a gigahertz microprocessort},
   BookTitle = {Design Automation Conference},
   Pages = {168-171},
      Year = {2000} }



@inproceedings{
PV:YPS99,
   Author = {Ying, Liu and Pileggi, L. T. and Strojwas, A. J.},
   Title = {Model order-reduction of RC(L) interconnect including variational analysis},
   BookTitle = {Design Automation Conference},
   Pages = {201-206},
   Abstract = {As interconnect feature sizes continue to scale to smaller dimensions, long interconnect can dominate the IC timing performance, but the interconnect parameter variations make it difficult to predict these dominant delay extremes. This paper presents a model order-reduction technique for RLC interconnect circuits that includes variational analysis to capture manufacturing variations. Matrix perturbation theory is combined with dominant-pole-analysis and Krylov-subspace-analysis methods to produce reduced-order models with direct inclusion of statistically independent manufacturing variations. The accuracy of the resulting variational reduced-order models is demonstrated on several industrial examples},
   Keywords = {distributed parameter networks
integrated circuit interconnections
integrated circuit modelling
linear network analysis
matrix algebra
perturbation techniques
reduced order systems
variational techniques
IC timing performance
Krylov-subspace-analysis methods
RCL interconnect circuits
dominant-pole-analysis
interconnect parameter variations
manufacturing variations
matrix perturbation theory
model order-reduction
variational analysis
variational reduced-order models},
   Year = {1999} }



@article{
PV:YBW+97,
   Author = {Yuan, Taur and Buchanan, D. A. and Wei, Chen and Frank, D. J. and Ismail, K. E. and Shih-Hsien, Lo and Sai-Halasz, G. A. and Viswanathan, R. G. and Wann, H. J. C. and Wind, S. J. and Hon-Sum, Wong},
   Title = {CMOS scaling into the nanometer regime},
   Journal = {Proceedings of the IEEE},
   Volume = {85},
   Number = {4},
   Pages = {486-504},
   Note = {0018-9219},
   Abstract = {Starting with a brief review on 0.1-&mu;m (100 nm) CMOS status, this paper addresses the key challenges in further scaling of CMOS technology into the nanometer (sub-100 nm) regime in light of fundamental physical effects and practical considerations. Among the issues discussed are: lithography, power supply and threshold voltage, short-channel effect, gate oxide, high-field effects, dopant number fluctuations and interconnect delays. The last part of the paper discusses several alternative or unconventional device structures, including silicon-on-insulator (SOI), SiGe MOSFET's, low-temperature CMOS, and double-gate MOSFET's, which may lead to the outermost limits of silicon scaling},
   Keywords = {CMOS integrated circuits
VLSI
integrated circuit interconnections
lithography
nanotechnology
silicon-on-insulator
CMOS scaling
MOSFETs
SOI
dopant number fluctuations
double-gate transistors
fundamental physical effects
gate oxide
high-field effects
interconnect delays
lithography
low-temperature CMOS
nanometer regime
power supply
short-channel effect
threshold voltage},
   Year = {1997} }



@article{
PV:ZNN+00,
   Author = {Zanella, S. and Nardi, A. and Neviani, A. and Quarantelli, M. and Saxena, S. and Guardiani, C.},
   Title = {Analysis of the impact of process variations on clock skew},
   Journal = {IEEE Transactions on Semiconductor Manufacturing},
   Volume = {13},
   Number = {4},
   Pages = {401-407},
   Note = {0894-6507},
   Abstract = {In this paper, we analyze the impact of process variations on the clock skew of VLSI circuits designed in deep submicrometer technologies. With smaller feature size, the utilization of a dense buffering scheme has been proposed in order to realize efficient and noise-immune clock distribution networks. However, the local variance of MOSFET electrical parameters, such as V<sub>T</sub> and I<sub>DSS</sub>, increases with scaling of device dimensions, thus causing large intradie variability of the timing properties of clock buffers. As a consequence, we expect process variations to be a significant source of clock skew in deep submicrometer technologies. In order to accurately verify this hypothesis, we applied advanced statistical simulation techniques and accurate mismatch measurement data in order to thoroughly characterize the impact of intradie variations on industrial clock distribution networks. The comparison with Monte Carlo simulations performed by neglecting the effect of mismatch confirmed that local device variations play a crucial role in the design and sizing of the clock distribution network},
   Keywords = {Monte Carlo methods
VLSI
circuit layout CAD
circuit simulation
clocks
high-speed integrated circuits
integrated circuit interconnections
integrated circuit layout
logic CAD
network routing
timing
Monte Carlo simulations
VLSI circuits
clock skew
deep submicrometer technologies
dense buffering scheme
device dimensions
feature size
intradie variability
local device variations
local variance
mismatch measurement data
noise-immune clock distribution networks
process variations
statistical simulation techniques
timing properties},
   Year = {2000} }



@inproceedings{
PV:YSS+05,
   Author = {Zhan, Yaping and Strojwas, A. J. and Sharma, M. and Newmark, D.},
   Title = {Statistical critical path analysis considering correlations},
   BookTitle = {IEEE/ACM International Conference on Computer-Aided Design},
   Pages = {699-704},
   Abstract = {Critical path analysis is always an important task in timing verification. For todays nanometer IC technologies, process variations have a significant impact on circuit performance. The variability can change the criticality of long paths (Gattiker et al., 2002). Therefore, statistical approaches should be incorporated in critical path analysis. In this paper, we present two novel techniques that can efficiently evaluate path criticality under statistical non-linear delay models. They are integrated into a block-based statistical timing tool with the capability of handling arbitrary correlations from manufacturing process dependence and also path sharing. Experiments on ISCAS85 benchmarks as well as industrial circuits prove both accuracy and efficiency of these techniques.},
   Keywords = {VLSI
critical path analysis
delay estimation
integrated circuit design
integrated circuit technology
nanoelectronics
network analysis
statistical analysis
block-based statistical timing tool
circuit performance
nanometer IC technology
process variations
statistical critical path analysis
statistical nonlinear delay model
timing verification},
   Year = {2005} }



@inproceedings{
PV:ZSS+05,
   Author = {Zhan, Yaping and Strojwas, Andrzej J. and Sharma, Mahesh and Newmark, David},
   Title = {Statistical Critical Path Analysis Considering Correlations},
   BookTitle = {  IEEE/ACM International Conference on Computer-Aided Design },
   Pages = {699-704},
   Abstract = {Critical path analysis is always an important task in timing verification. For todays nanometer IC technologies, process variations have a significant impact on circuit performance. The variability can change the criticality of long paths (Gattiker et al., 2002). Therefore, statistical approaches should be incorporated in critical path analysis. In this paper, we present two novel techniques that can efficiently evaluate path criticality under statistical non-linear delay models. They are integrated into a block-based statistical timing tool with the capability of handling arbitrary correlations from manufacturing process dependence and also path sharing. Experiments on ISCAS85 benchmarks as well as industrial circuits prove both accuracy and efficiency of these techniques.},
      Year = {2005} }



@inproceedings{
PV:XXC02,
   Author = {Zhang, Yumin and Hu, Xiaobo and Chen, D. Z.},
   Title = {Task scheduling and voltage selection for energy minimization},
   BookTitle = {Design Automation Conference},
   Pages = {183-188},
   Abstract = {In this paper, we present a two-phase framework that integrates task assignment, ordering and voltage selection (VS) together to minimize energy consumption of real-time dependent tasks executing on a given number of variable voltage processors. Task assignment and ordering in the first phase strive to maximize the opportunities that can be exploited for lowering voltage levels during the second phase, i.e., voltage selection. In the second phase, we formulate the VS problem as an Integer Programming (IP) problem and solve the IP efficiently. Experimental results demonstrate that our framework is very effective in executing tasks at lower voltage levels under different system configurations.},
   Keywords = {circuit CAD
circuit optimisation
directed graphs
integer programming
integrated circuit design
low-power electronics
multiprocessing systems
processor scheduling
IC design
direct acyclic graph
energy minimization
integer programming problem
multiple processor scheduling
real-time dependent tasks
single processor scheduling
task assignment
task ordering
task scheduling
two-phase framework
variable voltage processors
voltage selection},
   Year = {2002} }
