%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% PRELIMINARY JOURNAL ARTICLE CITATIONS
@article{2020passon,
author = {R. Pasupathy and Y. Song},
title = {An Adaptive Sequential Sample Average Approximation Framework for Solving Two-stage Stochastic Linear Programs},
journal = {SIAM Journal on Optimization},
year = {2020},
volume = {},
number = {},
month = {},
pages = {},
url = {http://www.optimization-online.org/DB_HTML/2019/02/7068.html},
note = {To appear.},
keywords = {}
}
@article{2020raghunpastaa,
author = {P. K. Raghavan and S. R. Hunter and R. Pasupathy and M. R. Taaffe},
title = {Adaptive Sampling Line Search for Stochastic Optimization with Integer Variables},
journal = {Math. Programming},
year = {2020},
volume = {},
number = {},
month = {},
pages = {},
url = {http://www.optimization-online.org/DB_HTML/2019/02/7068.html},
note = {},
keywords = {}
}
@inproceedings{2020pasyehWSC,
author = {R. Pasupathy and Y. Yeh},
title = {Risk-Efficient Sequential Simulation Estimators},
booktitle = {Proceedings of the 2020 Winter Simulation Conference},
editor = {K.-H. Bae and B. Feng and S. Kim and S. Lazarova-Molnar and Z. Zheng and T. Roeder and R. Thiesing},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {}
}
@inproceedings{2020newbolpasyip,
author = {D. Newton and R. Bollapragada and R. Pasupathy and A. Nung-Kwan Yip},
title = {Retrospective Approximation for Smooth Stochastic Optimization},
booktitle = {OPT2020: 12-th Annual Workshop on Optimization for Machine Learning - NeurIPS}
}
@article{2019appfeletal,
Year = {2019},
Author = {E. A. Applegate and G. Feldman and S. R. Hunter and R. Pasupathy},
Title = {Multi-objective ranking and selection: optimal sampling laws and tractable approximations via SCORE},
journal = {Journal of Simulation},
volume = {14},
number = {1},
pages = {21--40},
howpublished = {},
doi = {},
url = {http://www.optimization-online.org/DB_HTML/2018/07/6750.html},
keywords = {}}
@article{2019pashonhun,
Year = {2019},
Author = {R. Pasupathy and H. Honnappa and S. R. Hunter},
Title = {Open Problem--Adaptive Constant-Step Stochastic Approximation},
journal = {Stochastic Systems},
doi = {10.1287/stsy.2019.0046},
abstract = {This paper was accepted for the Stochastic Systems Special Section on Open Problems in Applied Probability, presented at the 2018 INFORMS Annual Meeting in Phoenix, Arizona, November 4-7, 2018.},
keywords = {book chapters and literature reviews and open questions and tutorials}}
@article{2019leepas,
Year = {2019},
Author = {L. Leemis and R. Pasupathy},
Title = {The Ties that Bind},
journal = {Significance},
volume = {16},
number = {4},
pages = {8-9}}
@inproceedings{2019pasWSC,
year = {2019},
author = {R. Pasupathy},
title = {The Number of Random Restarts Required to Identify All Solutions to a Nonlinear System: Applications to Global Stochastic Optimization},
booktitle = {Proceedings of the 2019 Winter Simulation Conference},
editor = {N. Mustafee and K.H.G. Bae and S. Lazarova-Molnar and M. Rabe and C. Szabo and P. Haas and Y.J. Son},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {},
doi = {},
addendum = {}
}
@inproceedings{2019vasshapasWSC,
year = {2019},
author = {D. Vasquez and S. Shashaani and R. Pasupathy},
title = {{ASTRO} for Derivative-based Stochastic Optimization: Algorithm Description \& Numerical Experiments},
booktitle = {Proceedings of the 2019 Winter Simulation Conference},
editor = {N. Mustafee and K.{-H}.G. Bae and S. Lazarova-Molnar and M. Rabe and C. Szabo and P. Haas and Y.{-J}. Son},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {},
doi = {}
}
@article{2018lopetal,
author = {J. E. {Figueroa-L\'opez} and H. Lee and R. Pasupathy},
title = {Optimal placement of a small order in a diffusive limit order book},
journal = {High Frequency},
year = {2018},
volume = {1},
number = {2},
month = {},
pages = {87--116},
doi = {10.1002/hf2.10017},
url_Link = {https://arxiv.org/abs/1708.04337},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/diffusivelob.pdf},
note = {},
keywords = {}}
@article{2018sarhasetal,
author = {S. Shashaani and F. S. Hashemi and R. Pasupathy},
title = {{ASTRO-DF}: A Class of Adaptive Sampling Trust-Region Algorithms for Derivative-Free Simulation Optimization.},
journal = {SIAM Journal on Optimization},
year = {2018},
volume = {},
number = {},
month = {},
pages = {2753--3430},
url_Link = {http://www.optimization-online.org/DB_HTML/2015/10/5138.html},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2018shahaspas.pdf},
note = {}}
@article{2018pasglyetal,
author = {R. Pasupathy and P. W. Glynn and S. Ghosh and F. Hashemi},
title = {On sampling rates in simulation-based recursions.},
journal = {SIAM Journal on Optimization},
year = {2018},
volume = {28},
number = {1},
month = {},
pages = {45--73},
url_Link = {http://www.optimization-online.org/DB_HTML/2016/03/5361.html},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2018pasglyghohas.pdf},
note = {}}
@article{2017hasetal,
author = {F. S. Hashemi and R. Pasupathy and M. R. Taaffe},
title = {The Adaptive Sampling Gradient Method: Optimizing Smooth Functions with an Inexact Oracle},
journal = {},
year = {2017},
volume = {},
number = {},
month = {},
pages = {},
url_Link = {http://www.optimization-online.org/DB_HTML/2017/05/6045.html},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2018haspastaa.pdf},
note = {},
keywords = {}}
@article{2017nagpas,
author = {K. Nagaraj and R. Pasupathy},
title = {Stochastically Constrained Simulation Optimization On Integer-Ordered Spaces: The {cgR-SPLINE} Algorithm},
journal = {Operations Research},
year = {2017},
volume = {},
number = {},
month = {},
pages = {},
url_Link = {http://www.optimization-online.org/DB_HTML/2015/10/5139.html},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2018nagpas.pdf},
note = {},
keywords = {}}
%bibbase_note = {Journal version, Runner-up, 2014 INFORMS Computing Society Student Paper Award.}}
@inproceedings{2016shahunpasWSC,
year = {2016},
author = {S. Shashaani and S. R. Hunter and R. Pasupathy},
title = {{ASTRO-DF}: Adaptive Sampling Trust-Region Optimization Algorithms, Heuristics, and Numerical Experience.},
booktitle = {Proceedings of the 2016 Winter Simulation Conference},
editor = {T. M. K. Roeder and P. I. Frazier and R. Szechtman and E. Zhou},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {},
doi = {},
keywords = {simulation optimization, derivative-free, trust region},
abstract = {},
addendum = {}
}
@inproceedings{2015felhunpasWSC,
year = {2015},
author = {G. Feldman and S. R. Hunter and R. Pasupathy},
title = {Multi-objective simulation optimization on finite sets: optimal allocation via scalarization},
booktitle = {Proceedings of the 2015 Winter Simulation Conference},
editor = {L. Yilmaz and W. K. V. Chan and I. Moon and T. M. K. Roeder and C. Macal and M. D. Rossetti},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {},
doi = {},
keywords = {ranking and selection, multi-objective simulation optimization, optimal allocation},
abstract = {},
addendum = {}
}
@inproceedings{2015nagpasWSC,
year = {2015},
author = {K. Nagaraj and R. Pasupathy},
title = {Modeling Dependence in Simulation Input: The Case for Copulas.},
booktitle = {Proceedings of the 2015 Winter Simulation Conference},
editor = {L. Yilmaz and W. K. V. Chan and I. Moon and T. M. K. Roeder and C. Macal and M. D. Rossetti},
publisher = {Institute of Electrical and Electronics Engineers, Inc.},
address = {Piscataway, NJ},
pages = {},
doi = {},
keywords = {},
abstract = {},
addendum = {}
}
@article{2014liabbpas,
author = {P. Li and M. Abbas and R. Pasupathy},
title = {A Stochastic Dilemma Zone Protection Algorithm Based On Vehicle Trajectories},
journal = {Journal of Intelligent Transportation Systems},
year = {2014},
volume = {},
number = {},
month = {},
pages = {},
doi = {10.1080/15472450.2014.977043},
abstract = {A common method of the dilemma zone (DZ) protection at intersections is to hold the green until the number of vehicles in DZ is lower than a threshold. Since the threshold is typically empirical and fixed, it cannot accommodate the dynamic and time-varying traffic patterns and therefore should be adjusted regularly. This article presents a new Markov-process-based DZ protection algorithm, which considers the number of vehicles in DZ (i.e., the state) over time to be a Markov process. At each time step, the algorithm first predicts the future states with the Markov state-transit matrix, then compares them with the current state to determine whether to end the green or not. In this way, the new end-green criterion is not the fixed threshold value but the current state and the prediction with the Markov state-transit matrix. Meanwhile, the Markov matrix is automatically updated whenever the new observed detected state transitions come in. The new algorithms were also evaluated in simulation and the simulation results showed that the new algorithm maintains reliable and effective protection in a dynamic traffic environment. At last, we find that the new algorithm performance can be further improved if the vehicle trajectories are precisely measured rather than estimated.}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% PERMANENT JOURNAL ARTICLE CITATIONS
@article{2014pashunetal,
Year = {2014},
Author = {R. Pasupathy and S. R. Hunter and N. A. Pujowidianto and L. H. Lee and {C.-H.} Chen},
Title = {Stochastically constrained ranking and selection via {SCORE}},
Journal = {ACM Transactions on Modeling and Computer Simulation},
shortjournal = {ACM Trans. Model. Comput. Simul.},
volume = {25},
number = {1},
articleno = {1},
pages = {1--26},
numpages = {26},
doi = {10.1145/2630066},
url_Link = {http://dx.doi.org/10.1145/2630066},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2014pashunpujleeche.pdf},
keywords = {ranking and selection, optimal allocation, constrained simulation optimization},
abstract = {Consider the context of constrained simulation optimization (SO), that is, optimization problems where the objective and constraint functions are known through dependent Monte Carlo estimators. For solving such problems on large finite spaces, we provide an easily implemented sampling framework called SCORE (Sampling Criteria for Optimization using Rate Estimators) that approximates the optimal simulation budget allocation. We develop a general theory, but like much of the existing literature on ranking and selection, our focus is on SO problems where the distribution of the simulation observations is Gaussian. We first characterize the nature of the optimal simulation budget as a bilevel optimization problem. We then show that under a certain asymptotic limit, the solution to the bilevel optimization problem becomes surprisingly tractable and is expressed through a single intuitive measure, the score. We provide an iterative SO algorithm that repeatedly estimates the score and determines how the available simulation budget should be expended across contending systems. Numerical experience with the algorithm resulting from the proposed sampling approximation is very encouraging --- in numerous examples of constrained SO problems having 1,000 to 10,000 systems, the optimal allocation is identified to negligible error within a few seconds to one minute on a typical laptop computer. Corresponding times to solve the full bilevel optimization problem range from tens of minutes to several hours.}}
%bibbase_note = {Journal version, Finalist, WSC Best Theoretical Paper Award.}}
%addendum = {(Journal version, \textbf{Finalist, WSC Best Theoretical Paper Award}.)}}
@article{2014pastaaetal,
author = {R. Pasupathy and M. Taaffe and B. W. Schmeiser and W. Wang},
title = {Control-variate estimation using estimated control means},
journal = {IIE Transactions},
year = {2014},
volume = {44},
number = {5},
month = {},
pages = {381--385},
doi = {10.1080/0740817X.2011.610430},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2012pastaaschwan.pdf},
keywords = {},
abstract = {This article studies control-variate estimation where the control mean itself is estimated. Control-variate estimation in simulation experiments can significantly increase sampling efficiency and has traditionally been restricted to cases where the control has a known mean. In a previous paper the current authors generalized the idea of control variate estimation to the case where the control mean is only approximated. The result is a biased but possibly useful estimator. For that case, a mean square error optimal estimator was provided and its properties were discussed. This article generalizes classical control variate estimation to the case of Control Variates using Estimated Means (CVEMs). CVEMs replace the control mean with an estimated value for the control mean obtained from a prior simulation experiment. Although the resulting control-variate estimator is unbiased, it does introduce additional sampling error and so its properties are not the same as those of the standard control-variate estimator. A CVEM estimator is developed that minimizes the overall estimator variance. Both biased control variates and CVEMs can be used to improve the efficiency of stochastic simulation experiments. Their main appeal is that the restriction of having to know (deterministically) the exact value of the control mean is eliminated; thus, the space of possible controls is greatly increased.}}
@article{2013wanpassch,
author = {H. Wang and R. Pasupathy and B. W. Schmeiser},
title = {Integer-Ordered Simulation Optimization using {R-SPLINE}: Retrospective Search using Piecewise-Linear Interpolation and Neighborhood Enumeration},
journal = {ACM TOMACS},
year = {2013},
volume = {23},
number = {3},
pages = {},
doi = {10.1145/2499913.2499916},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2013wanpassch.pdf},
keywords = {retrospective approximation, simulation optimization on integer-ordered spaces},
abstract = {We consider simulation-optimization (SO) models where the decision variables are integer ordered and the objective function is defined implicitly via a simulation oracle, which for any feasible solution can be called to compute a point estimate of the objective-function value. We develop R-SPLINE---a Retrospective-search algorithm that alternates between a continuous Search using Piecewise-Linear Interpolation and a discrete Neighborhood Enumeration, to asymptotically identify a local minimum. R-SPLINE appears to be among the first few gradient-based search algorithms tailored for solving integer-ordered local SO problems. In addition to proving the almost-sure convergence of R-SPLINE?s iterates to the set of local minima, we demonstrate that the probability of R-SPLINE returning a solution outside the set of true local minima decays exponentially in a certain precise sense. R-SPLINE, with no parameter tuning, compares favorably with popular existing algorithms.}}
@article{2013hunpas,
Year = {2013},
Author = {S. R. Hunter and R. Pasupathy},
Title = {Optimal sampling laws for stochastically constrained simulation optimization on finite sets},
Journal = {INFORMS Journal on Computing},
shortjournal = {INFORMS J. Comput.},
volume = {25},
number = {3},
pages = {527--542},
doi = {10.1287/ijoc.1120.0519},
url_Link = {http://dx.doi.org/10.1287/ijoc.1120.0519},
url_Paper = {http://web.ics.purdue.edu/~pasupath/PAPERS/2013hunpas.pdf},
keywords = {constrained simulation optimization, optimal allocation, ranking and selection},
abstract = {Consider the context of selecting an optimal system from among a finite set of competing systems, based on a stochastic objective function and subject to multiple stochastic constraints. In this context, we characterize the asymptotically optimal sample allocation that maximizes the rate at which the probability of false selection tends to zero. Since the optimal allocation is the result of a concave maximization problem, its solution is particularly easy to obtain in contexts where the underlying distributions are known or can be assumed. We provide a consistent estimator for the optimal allocation and a corresponding sequential algorithm fit for implementation. Various numerical examples demonstrate how the proposed allocation differs from competing algorithms.}}
%bibbase_note = {Winner, 2011 INFORMS Computing Society Student Paper Award.}}
%addendum = {(\textbf{Winner, 2011 INFORMS Computing Society Student Paper Award}.)}}
@article{2012lepas,
author = {K. Le and R. Pasupathy},
title = {A note on the number of random restarts required to
approximate all solutions of a stochastic nonlinear system},
journal = {Operations Research},
year = {2012},
volume = {},
number = {},
month = {},
pages = {},
note = {},
keywords = {}}
@article{2012ghopas,
author = {S. Ghosh and R. Pasupathy},
title = {{C-NORTA}: A rejection procedure for sampling from the tail of bivariate {NORTA} distributions},
journal = {INFORMS Journal on Computing},
year = {2012},
volume = {24},
number = {2},
pages = {295--310},
doi = {10.1287/ijoc.1100.0447},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2012ghopas.pdf},
keywords = {random variate generation},
abstract = {We propose C-NORTA, an exact algorithm to generate random variates from the tail of a bivariate NORTA random vector. (A NORTA random vector is specified by a pair of marginals and a rank or product?moment correlation, and it is sampled using the popular NORmal-To-Anything procedure.) We first demonstrate that a rejection-based adaptation of NORTA on such constrained random vector generation problems may often be fundamentally intractable. We then develop the C-NORTA algorithm, relying on strategic conditioning of the NORTA vector, followed by efficient approximation and acceptance/rejection steps. We show that, in a certain precise asymptotic sense, the sampling efficiency of C-NORTA is exponentially larger than what is achievable through a naïve application of NORTA. Furthermore, for at least a certain class of problems, we show that the acceptance probability within C-NORTA decays only linearly with respect to a defined rarity parameter. The corresponding decay rate achievable through a naïve adaptation of NORTA is exponential. We provide directives for efficient implementation.}}
@article{2011paskim,
author = {R. Pasupathy and S. Kim},
title = {The stochastic root-finding problem: overview, solutions, and open questions},
journal = {ACM TOMACS},
year = {2011},
volume = {21},
number = {3},
article = {19},
doi = {10.1145/1921598.1921603},
pages = {23},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2011paskim.pdf},
keywords = {stochastic root-finding, literature reviews and tutorials},
abstract = {The stochastic root-finding problem (SRFP) is that of finding the zero(s) of a vector function, that is, solving a nonlinear system of equations when the function is expressed implicitly through a stochastic simulation. SRFPs are equivalently expressed as stochastic fixed-point problems, where the underlying function is expressed implicitly via a noisy simulation. After motivating SRFPs using a few examples, we review available methods to solve such problems on constrained Euclidean spaces. We present the current literature as three broad categories, and detail the basic theoretical results that are currently known in each of the categories. With a view towards helping the practitioner, we discuss specific variations in their implementable form, and provide references to computer code when easily available. Finally, we list a few questions that are worthwhile research pursuits from the standpoint of advancing our knowledge of the theoretical underpinnings and the implementation aspects of solutions to SRFPs.}}
@article{2010varpaslee,
author = {E. Vargo and R. Pasupathy and L. Leemis},
title = {Moment-ratio diagrams for univariate distributions},
journal = {Journal of Quality Technology},
year = {2010},
volume = {42},
number = {3},
pages = {276--286},
doi = {},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2010varpasleeCOLOR.pdf},
keywords = {moment-ratio diagrams},
abstract = {We present two moment-ratio diagrams along with guidance for their interpretation. The first moment-ratio diagram is a graph of skewness versus kurtosis for common univariate probability distributions. The second moment-ratio diagram is a graph of coefficient of variation versus skewness for common univariate probability distributions. Both of these diagrams, to our knowledge, are the most comprehensive to date. The diagrams serve four purposes: (1) they quantify the proximity between various univariate distributions based on their second, third, and fourth moments; (2) they illustrate the versatility of a particular distribution based on the range of values that the moments can assume; and (3) they can be used to create a short list of potential probability models based on a data set; (4) they clarify the limiting relationships between various well-known distribution families. The use of the moment-ratio diagrams for choosing a distribution that models given data is illustrated.}}
%========== NOLINK ==========
@Article{2010liabbetal,
author = {P. Li and M. Abbas and R. Pasupathy and L. Head},
title = {Simulation-based optimization of maximum green setting under retrospective approximation framework},
journal = {Transportation Research},
year = {2010},
volume = {2192},
doi = {10.3141/2192-01},
pages = {1--10},
keywords = {transportation},
abstract = {Most traffic signal systems work under highly dynamic traffic conditions, and they can be studied adequately only through simulation. As a result, how to optimize traffic signal system parameters in a stochastic framework has become increasingly important. Retrospective approximation (RA) represents the latest theoretical development in stochastic simulation. Under the RA framework, the solution to a simulation-based optimization problem can be approached with a sequence of approximate optimization problems. Each of these problems has a specific sample size and is solved to a specific error tolerance. This research applied the RA concept to the optimal design of the maximum green setting of the multidetector green extension system. It also designed a variant of the Markov monotonic search algorithm that can accommodate the requirements of the RA framework, namely, the inheritable Markov monotonic search algorithm, and implemented the RA-based optimization engine within VISSIM. The results show that the optimized maximum green can considerably increase composite performance (reducing delay and increasing safety) compared with traditional designs. The optimization methodology presented in this paper can easily be expanded to other signal parameters.}}
@article{2010shipas,
author = {K. Shin and R. Pasupathy},
title = {A method for fast generation of {P}oisson random vectors},
journal = {INFORMS Journal on Computing},
year = {2010},
volume = {22},
number = {1},
pages = {81--92},
doi = {10.1287/ijoc.1090.0332},
keywords = {random variate generation},
abstract = {We present the ``trivariate reduction extension'' (TREx)?an exact algorithm for the fast generation of bivariate Poisson random vectors. Like the normal-to-anything (NORTA) procedure, TREx has two phases: a preprocessing phase when the required algorithm parameters are identified, and a generation phase when the parameters identified during the preprocessing phase are used to generate the desired Poisson vector. We prove that the proposed algorithm covers the entire range of theoretically feasible correlations, and we provide efficient-computation directives and rigorous bounds for truncation error control. We demonstrate through extensive numerical tests that TREx, being a specialized algorithm for Poisson vectors, has a preprocessing phase that is uniformly a hundred to a thousand times faster than a fast implementation of NORTA. The generation phases of TREx and NORTA are comparable in speed, with that of TREx being marginally faster. All code is publicly available.}}
@Article{2010pas,
author = {R. Pasupathy},
title = {On choosing parameters in retrospective-approximation algorithms for stochastic root finding and simulation optimization},
journal = {Operations Research},
year = {2010},
volume = {58},
number = {4},
pages = {889--901},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2010pas.pdf},
doi = {10.1287/opre.1090.0773},
keywords = {retrospective approximation, stochastic root-finding},
abstract = {The stochastic root-finding problem is that of finding a zero of a vector-valued function known only through a stochastic simulation. The simulation-optimization problem is that of locating a real-valued function's minimum, again with only a stochastic simulation that generates function estimates. Retrospective approximation (RA) is a sample-path technique for solving such problems, where the solution to the underlying problem is approached via solutions to a sequence of approximate deterministic problems, each of which is generated using a specified sample size, and solved to a specified error tolerance. Our primary focus, in this paper, is providing guidance on choosing the sequence of sample sizes and error tolerances in RA algorithms. We first present an overview of the conditions that guarantee the correct convergence of RA's iterates. Then we characterize a class of error-tolerance and sample-size sequences that are superior to others in a certain precisely defined sense. We also identify and recommend members of this class and provide a numerical example illustrating the key results.}}
%bibbase_note = {Winner, 2010 INFORMS Computing Society Student Paper Award.}
@Article{2009passch,
author = {R. Pasupathy and B. W. Schmeiser},
title = {Retrospective-approximation algorithms for multidimensional stochastic root-finding problems},
journal = {ACM TOMACS},
year = {2009},
volume = {19},
number = {2},
pages = {5:1--5:36},
doi = {10.1145/1502787.1502788},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2009passch.pdf},
keywords = {retrospective approximation, stochastic root-finding},
abstract = {The stochastic root-finding problem (SRFP) is that of solving a nonlinear system of equations using only a simulation that provides estimates of the functions at requested points. Equivalently, SRFPs seek locations where an unknown vector function attains a given target using only a simulation capable of providing estimates of the function. SRFPs find application in a wide variety of physical settings. We develop a family of retrospective-approximation (RA) algorithms called Bounding RA that efficiently solves a certain class of multidimensional SRFPs. During each iteration, Bounding RA generates and solves a sample-path problem by identifying a polytope of stipulated diameter, with an image that bounds the given target to within stipulated tolerance. Across iterations, the stipulations become increasingly stringent, resulting in a sequence of shrinking polytopes that approach the correct solution. Efficiency results from: (i) the RA structure, (ii) the idea of using bounding polytopes to exploit problem structure, and (iii) careful step-size and direction choice during algorithm evolution. Bounding RA has good finite-time performance that is robust with respect to the location of the initial solution, and algorithm parameter values. Empirical tests suggest that Bounding RA outperforms Simultaneous Perturbation Stochastic Approximation (SPSA), which is arguably the best-known algorithm for solving SRFPs.}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% PRELIMINARY BOOK CITATIONS
@incollection{2014kimpashen,
author = {S. Kim and R. Pasupathy and S. G. Henderson},
title = {A Guide to {SAA}},
booktitle = {Encyclopedia of Operations Research and Management Science},
series = {Hillier and Lieberman OR Series},
publisher = {Elsevier},
editor = {M. Fu},
pages = {},
year = {2014},
keywords = {sample average approximation, literature reviews and tutorials}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% PERMANENT BOOK CITATIONS
@incollection{2018newyoupas,
Author = {D. Newton and F. Yousefian and R. Pasupathy},
Title = {Stochastic Gradient Descent: Modern Trends},
Booktitle = {{TutORials} in Operations Research},
Publisher = {INFORMS},
Editor = {E. Gel and D. Lewis},
Year = {2018},
Pages = {},
Chapter = {7}
}
@incollection{2017varpaslee,
Author = {E. Vargo and R. Pasupathy and L. M. Leemis},
Title = {Moment-Ratio Diagrams for Univariate Distributions},
Booktitle = {Computational Probability and Applications},
Publisher = {},
Editor = {A. G. Glen and L. M. Leemis},
Year = {2017},
Pages = {},
Chapter = {},
keywords = {}
}
@incollection{2013pasgho,
Author = {R. Pasupathy and S. Ghosh},
Title = {Simulation Optimization: A concise overview and implementation guide},
Booktitle = {{TutORials} in Operations Research},
Publisher = {INFORMS},
Editor = {H. Topaloglu},
Year = {2013},
Pages = {122--150},
Chapter = {7},
keywords = {literature reviews and tutorials}
}
@incollection{2011pasA,
author = {R. Pasupathy},
title = {Generating homogenous {P}oisson processes},
booktitle = {Wiley Encyclopedia of Operations Research and Management Science},
year = {2011},
publisher = {Wiley},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2011pasA.pdf},
keywords = {random variate generation, literature reviews and tutorials}
}
@incollection{2011pasB,
author = {R. Pasupathy},
title = {Generating nonhomogenous {P}oisson processes},
booktitle = {Wiley Encyclopedia of Operations Research and Management Science},
year = {2011},
publisher = {Wiley},
url = {http://web.ics.purdue.edu/~pasupath/PAPERS/2011pasB.pdf},
keywords = {random variate generation, literature reviews and tutorials}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% WSC PROCEEDINGS
%------------------------------------------------------------------------------------
% Preliminary Citations
%------------------------------------------------------------------------------------
% Permanent Citations
@inproceedings{2018jaihonpasWSC,
author = {P. Jaiswal and H. Honnappa and R. Pasupathy},
title = {Optimal Allocations for Sample Average Approximation},
booktitle = {Proceedings of the 2018 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {M. Rabe and A. A. Juan and N. Mustafee and A. Skoogh and S. Jain and B. Johansson, eds.},
pages = {},
year = {2018},
url = {},
keywords = {}
}
@inproceedings{2018newyoupasWSC,
author = {D. Newton and F. Yousefian and R. Pasupathy},
title = {Recent Trends in Stochastic Gradient Descent for Machine Learning and Big Data},
booktitle = {Proceedings of the 2018 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {M. Rabe and A. A. Juan and N. Mustafee and A. Skoogh and S. Jain and B. Johansson, eds.},
pages = {},
year = {2018},
url = {},
keywords = {}
}
%------ 2014 ------------------
@inproceedings{2014hasghopasWSC,
author = {F. Hashemi and S. Ghosh and R. Pasupathy},
title = {On adaptive sampling rules for stochastic recursions},
booktitle = {Proceedings of the 2014 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {A. Tolk and S. Y. Diallo and I. O. Ryzhov and L. Yilmaz and S. Buckley and J. A. Miller},
pages = {},
year = {2014},
url = {},
keywords = {},
abstract = {We consider the problem of finding a zero of an unknown function, or optimizing an unknown function,
with only a stochastic simulation that outputs noise-corrupted observations. A convenient paradigm to solve
such problems takes a deterministic recursion, e.g., Newton-type or trust-region, and replaces function
values and derivatives appearing in the recursion with their sampled counterparts. While such a paradigm
is convenient, there is as yet no clear guidance on how much simulation effort should be expended as the
resulting recursion evolves through the search space. In this paper, we take the first steps towards answering
this question. We propose using a fully sequential Monte Carlo sampling method to adaptively decide
how much to sample at each point visited by the stochastic recursion. The termination criterion for such
sampling is based on a certain relative width confidence interval constructed to ensure that the resulting
iterates are consistent, and efficient in a rigorous (Monte Carlo canonical) sense. The methods presented
here are adaptive in the sense that they ⤽learn⤝ to sample according to the algorithm trajectory. In this
sense, our methods should be seen as refining recent methods in a similar context that use a predetermined
sequence of sample sizes.}}
%bibbase_note = {Winner, WSC Student Paper Competition.}}
%------ 2013 ------------------
@inproceedings{2013nagpasWSC,
author = {K. Nagaraj and R. Pasupathy},
title = {{R-SPLINE} for local integer-ordered simulation optimization problems with stochastic constraints},
booktitle = {Proceedings of the 2013 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {R. Pasupathy and {S.-H}. Kim and A. Tolk and R. Hill and M. E. Kuhl},
pages = {846--855},
year = {2013},
url = {http://informs-sim.org/wsc13papers/includes/files/073.pdf},
keywords = {retrospective approximation, constrained simulation optimization, simulation optimization on integer-ordered spaces},
abstract = {R-SPLINE is a recently proposed competitor to the popular COMPASS algorithm for solving local integer-ordered simulation optimization problems that have either an unconstrained or a deterministically-constrained feasible region. R-SPLINE is a refined sample-average approximation algorithm with a structure that is particularly conducive to the inclusion of stochastic constraints. In this paper we consider one such trivial adaptation of R-SPLINE. Our aim is narrow in that we wish only to investigate the asymptotic behavior of the resulting iterates. Accordingly, we demonstrate sufficient conditions under which the proposed adaptation's iterates match the consistency and convergence rate qualities of the iterates from the originally proposed R-SPLINE. Ongoing numerical experiments show much promise but raise important questions about the choice of algorithm parameters when the adaptation is executed on problems where one or more of the constraints are binding.}}
@inproceedings{2013ghopasWSC,
author = {S. Ghosh and R. Pasupathy},
title = {Online quantile and density estimators},
booktitle = {Proceedings of the 2013 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {R. Pasupathy and {S.-H}. Kim and A. Tolk and R. Hill and M. E. Kuhl},
pages = {778--789},
year = {2013},
url = {http://informs-sim.org/wsc13papers/includes/files/067.pdf},
keywords = {quantiles, O estimators},
abstract = {The traditional estimator for the p-quantile of a random variable X is obtained by inverting the empirical cumulative distribution function (cdf) constructed from n obtained observations. The estimator requires O(n) storage, and the mean squared error of the estimator decays as O(1/n). In this article, we present an alternative estimator that requires dramatically less storage with negligible loss in convergence rate. The proposed estimator relies on an alternative cdf that is constructed by accumulating the observed random variates into variable-sized bins that progressively become finer around the quantile. The size of the bins are adjusted to ensure that the increased bias due to binning does not adversely affect the resulting convergence rate. We present an "online'' version of the estimator, and discuss of some of its theoretical properties. We also discuss analogous ideas for density estimation.}}
%------ 2012 ------------------
@inproceedings{2012pujhunetalWSC,
Year = {2012},
Author = {N. A. Pujowidianto and S. R. Hunter and R. Pasupathy and L. H. Lee and {C.-H.} Chen},
Title = {Closed-Form Sampling Laws For Stochastically Constrained Simulation Optimization On Large Finite Sets},
Booktitle = {Proceedings of the 2012 Winter Simulation Conference},
Editor = {C. Laroque and J. Himmelspach and R. Pasupathy and O. Rose and A. M. Uhrmacher},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Pages = {168--177},
url = {http://informs-sim.org/wsc12papers/includes/files/con526.pdf},
urldate = {2013-07-20},
keywords = {ranking and selection, optimal allocation, constrained simulation optimization},
abstract = {Consider the context of constrained simulation optimization (SO), i.e., optimization problems where the objective function and constraints are known through a Monte Carlo simulation, with corresponding estimators possibly dependent. We identify the nature of sampling plans that characterize efficient algorithms, particularly in large countable spaces. We show that in a certain asymptotic sense, the optimal sampling characterization, that is, the sampling budget for each system that guarantees optimal convergence rates, depends on a single easily estimable quantity called the score. This result provides a useful and easily implementable sampling allocation that approximates the optimal allocation, which is otherwise intractable due to it being the solution to a difficult bilevel optimization problem. Our results point to a simple sequential algorithm for efficiently solving large-scale constrained simulation optimization problems on finite sets.}}
@inproceedings{2012haspasWSC,
author = {F. Hashemi and R. Pasupathy},
title = {Averaging and Derivative Estimation Within Stochastic Approximation Algorithms},
booktitle = {Proceedings of the 2012 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {C. Laroque and J. Himmelspach and R. Pasupathy and O. Rose and A. Uhrmacher},
pages = {232--240},
year = {2012},
url = {http://informs-sim.org/wsc12papers/includes/files/con528.pdf},
keywords = {stochastic approximation},
abstract = {Stochastic Approximation (SA) is arguably the most investigated amongst algorithms for solving local continuous simulation-optimization problems. Despite its enduring popularity, the prevailing opinion is that the finite-time performance of SA-type algorithms is not robust to SA's sequence of algorithm parameters. In the last two decades, two major advances have been proposed toward alleviating this: (i) Polyak-Ruppert averaging where SA is executed in multiple time scales to allow iterates to use large (initial) step sizes for better finite-time performance, without sacrificing the asymptotic convergence rate; and (ii) efficient derivative estimation to allow better searching within the solution space. Interestingly, however, all existing literature on SA seems to treat each of these advances separately. In this article, we present two results which characterize convergence rates when (i) and (ii) are be applied simultaneously. Our results should be seen as providing a theoretical basis for applying ideas that seem to work in practice.}}
%------ 2011 ------------------
@inproceedings{2011ghopas,
author = {S. Ghosh and R. Pasupathy},
title = {On Interior-Point Based Retrospective Approximation Methods for Solving Two-Stage Stochastic Linear Programs},
booktitle = {Proceedings of the 2011 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},
pages = {4163--4171},
year = {2011},
url = {http://www.informs-sim.org/wsc11papers/370.pdf},
keywords = {retrospective approximation, two-stage stochastic linear programs},
abstract = {In a recent paper, Gongyun Zhao introduced what appears to be the first interior point formulation for solving two-stage stochastic linear programs for finite support random variables. In this paper, we generalize Gongyun Zhao's formulation by incorporating it into a retrospective approximation framework. What results is an implementable interior-point solution paradigm that can be used to solve general two-stage stochastic linear programs. After discussing some basic properties, we characterize the complexity of the algorithm, leading to guidance on the number of samples that should be generated to construct the sub-problem linear programs, effort expended in solving the sub-problems, and the effort expended in solving the master problem.}}
@inproceedings{2011hunpujetalWSC,
Year = {2011},
Author = {S. R. Hunter and N. A. Pujowidianto and {C.-H.} Chen and L. H. Lee and R. Pasupathy and C. M. Yap},
Title = {Optimal sampling laws for constrained simulation optimization on finite sets: the bivariate normal case},
Booktitle = {Proceedings of the 2011 Winter Simulation Conference},
Editor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Pages = {4294--4302},
url = {http://www.informs-sim.org/wsc11papers/382.pdf},
urldate = {2013-07-20},
keywords = {ranking and selection, optimal allocation, constrained simulation optimization},
abstract = {Consider the context of selecting an optimal system from among a finite set of competing systems, based on a ``stochastic'' objective function and subject to a single ``stochastic'' constraint. In this setting, and assuming the objective and constraint performance measures have a bivariate normal distribution, we present a characterization of the optimal sampling allocation across systems. Unlike previous work on this topic, the characterized optimal allocations are asymptotically exact and expressed explicitly as a function of the correlation between the performance measures.}
}
@inproceedings{2011pashenWSC,
author = {R. Pasupathy and S. G. Henderson},
title = {{SimOpt}: A Library of Simulation Optimization Problems},
booktitle = {Proceedings of the 2011 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},
pages = {4080--4090},
year = {2011},
url = {http://www.informs-sim.org/wsc11papers/363.pdf},
keywords = {testbed, simopt.org},
abstract = {We present SimOpt --- a library of simulation-optimization problems intended to spur development and comparison of simulation-optimization methods and algorithms. The library currently has over 50 problems that are tagged by important problem attributes such as type of decision variables, and nature of constraints. Approximately half of the problems in the library come with a downloadable simulation oracle that follows a standardized calling protocol. We also propose the idea of problem and algorithm wrappers with a view toward facilitating assessment and comparison of simulation optimization algorithms.}}
@inproceedings{2011pasWSC,
author = {R. Pasupathy},
title = {An Introspective on the Retrospective-approximation Paradigm},
booktitle = {Proceedings of the 2011 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
editor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},
pages = {412--421},
year = {2011},
url = {http://www.informs-sim.org/wsc11papers/035.pdf},
keywords = {retrospective approximation, stochastic root-finding},
abstract = {Retrospective Approximation (RA) is a solution paradigm introduced in the early 1990s by Chen and Schmeiser for solving one-dimensional stochastic root finding problems (SRFPs). The RA paradigm can be thought of as a refined and implementable version of sample average approximation, where a sequence of approximate problems are strategically generated and solved to identify iterates that progressively approach the desired solution. While originally aimed at one-dimensional SRFPs, the paradigm's broader utility, particularly within general simulation optimization algorithms, is becoming increasingly evident. We discuss the RA paradigm, demonstrate its usefulness, present the key results and papers on the topic over the last fifteen years, and speculate fruitful future directions.}}
%------ 2010 ------------------
@inproceedings{2010passchBWSC,
author = {R. Pasupathy and B. W. Schmeiser},
title = {The Initial Transient in Steady-State Point Estimation: Contexts, A Bibliography, The MSE Criterion, and The MSER Statistic},
booktitle = {Proceedings of the 2010 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Editor = {B.~Johansson and S.~Jain and J.~Montoya-Torres and J.~Hugan and E.~Y\"{u}cesan},
pages = {184--197},
year = {2010},
url = {http://www.informs-sim.org/wsc10papers/017.pdf},
keywords = {initial transient},
abstract = {The initial transient is an unavoidable issue when estimating parameters of steady-state distributions. We discuss contexts and factors that affect how the initial transient is handled, provide a bibliography (from the system simulation literature), discuss criteria for evaluating initial-transient algorithms, arguing for focusing on the mean squared error (mse). We discuss the MSER statistic, showing that it is asymptotically proportional to the mse and therefore a good foundation for initial-transient algorithms. We suggest two new algorithms (MSER-LLM and MSER-LLM2) for using the MSER statistic and compare them, based on empirical results for M/M/1 and AR(1) data processes, to the original MSER algorithm (MSER-GM).}
}
@inproceedings{2010passchAWSC,
author = {R. Pasupathy and B. W. Schmeiser},
title = {Root Finding via DARTS: Dynamic Adaptive Random Target Shooting},
booktitle = {Proceedings of the 2010 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Editor = {B.~Johansson and S.~Jain and J.~Montoya-Torres and J.~Hugan and E.~Y\"{u}cesan},
pages = {1255--1262},
year = {2010},
url = {http://www.informs-sim.org/wsc10papers/115.pdf},
keywords = {stochastic root-finding, stochastic approximation},
abstract = {Consider multi-dimensional root finding when the equations are available only implicitly via a Monte Carlo simulation oracle that for any solution returns a vector of point estimates. We develop DARTS, a stochastic-approximation algorithm that makes quasi-Newton moves to a new solution whenever the current sample size is large compared to the estimated quality of the current solution and estimated sampling error. We show that DARTS converges in a certain precise sense, and discuss reasons to expect substantial computational efficiencies over traditional stochastic approximation variations.}}
@inproceedings{2010passzeyucWSC,
author = {R. Pasupathy and R. Szechtman and E. Y\"{u}cesan},
title = {Selecting Small Quantiles},
booktitle = {Proceedings of the 2010 Winter Simulation Conference},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Editor = {B.~Johansson and S.~Jain and J.~Montoya-Torres and J.~Hugan and E.~Y\"{u}cesan},
pages = {2762--2770},
year = {2010},
url = {http://www.informs-sim.org/wsc10papers/255.pdf},
keywords = {ranking and selection, quantiles, optimal allocation},
abstract = {Ranking and selection (R&S) techniques are statistical methods developed to select the best system, or a subset of systems from among a set of alternative system designs. R&S via simulation is particularly appealing as it combines modeling flexibility of simulation with the efficiency of statistical techniques for effective decision making. The overwhelming majority of the R&S research, however, focuses on the expected performance of competing designs. Alternatively, quantiles, which provide additional information about the distribution of the performance measure of interest, may serve as better risk measures than the usual expected value. In stochastic systems, quantiles indicate the level of system performance that can be delivered with a specified probability. In this paper, we address the problem of ranking and selection based on quantiles. In particular, we formulate the problem and characterize the optimal budget allocation scheme using the large deviations theory.}}
@inproceedings{2010hunpasWSC,
Year = {2010},
Author = {S. R. Hunter and R. Pasupathy},
Title = {Large-deviation sampling laws for constrained simulation optimization on finite sets},
Booktitle = {Proceedings of the 2010 Winter Simulation Conference},
Editor = {B.~Johansson and S.~Jain and J.~Montoya-Torres and J.~Hugan and E.~Yucesan},
Publisher = {Institute of Electrical and Electronics Engineers, Inc.},
Address = {Piscataway, NJ},
Pages = {995--1002},
url = {http://www.informs-sim.org/wsc10papers/090.pdf},
urldate = {2013-07-20},
Keywords = {constrained simulation optimization, optimal allocation, ranking and selection},
abstract = {We consider the problem of selecting an optimal system from among a finite set of competing systems, based on a ``stochastic'' objective function and subject to a single ``stochastic'' constraint. By strategically dividing the competing systems, we derive a large deviations sampling framework that asymptotically minimizes the probability of false selection. We provide an illustrative example where a closed-form sampling law is obtained after relaxation.}}