@inproceedings{agarwal2018reductions,
  author    = {Alekh Agarwal and
               Alina Beygelzimer and
               Miroslav Dudík and
               John Langford and
               Hanna M. Wallach},
  title     = {A Reductions Approach to Fair Classification},
  booktitle = {{ICML}},
  series    = {Proceedings of Machine Learning Research},
  volume    = {80},
  pages     = {60--69},
  publisher = {{PMLR}},
  year      = {2018},
  url       = {http://proceedings.mlr.press/v80/agarwal18a.html}
}

@inproceedings{agarwal2019fair,
  author    = {Alekh Agarwal and
               Miroslav Dudík and
               Zhiwei Steven Wu},
  title     = {Fair Regression: Quantitative Definitions and Reduction-Based Algorithms},
  booktitle = {{ICML}},
  series    = {Proceedings of Machine Learning Research},
  volume    = {97},
  pages     = {120--129},
  publisher = {{PMLR}},
  year      = {2019},
  url       = {http://proceedings.mlr.press/v97/agarwal19d.html}
}


@inproceedings{al2018comparative,
  title={A comparative study of different curve fitting algorithms in artificial neural network using housing dataset},
  author={Al Bataineh, Ali and Kaur, Devinder},
  booktitle={Naecon 2018-ieee national aerospace and electronics conference},
  pages={174--178},
  year={2018},
  organization={IEEE},
  url={https://ieeexplore.ieee.org/abstract/document/8556738}
}


@article{barocas2016big,
  title={Big data's disparate impact},
  author={Barocas, Solon and Selbst, Andrew D},
  journal={California law review},
  pages={671--732},
  year={2016},
  url={https://www.jstor.org/stable/24758720}
}


@book{barocas2019fairness,
  title = {Fairness and Machine Learning},
  author = {Solon Barocas and Moritz Hardt and Arvind Narayanan},
  publisher = {fairmlbook.org},
  url = {http://www.fairmlbook.org/},
  year = {2019}
}


@article{bird2020fairlearn,
  title={Fairlearn: A toolkit for assessing and improving fairness in AI},
  author={Bird, Sarah and Dud{\'\i}k, Miro and Edgar, Richard and Horn, Brandon and Lutz, Roman and Milan, Vanessa and Sameki, Mehrnoosh and Wallach, Hanna and Walker, Kathleen},
  journal={Microsoft, Tech. Rep. MSR-TR-2020-32},
  year={2020},
  url={https://www.microsoft.com/en-us/research/uploads/prod/2020/05/Fairlearn_whitepaper.pdf}
}


@book{broussard2018artificial,
  title={Artificial unintelligence: How computers misunderstand the world},
  author={Broussard, Meredith},
  year={2018},
  publisher={MIT Press},
  url={https://mitpress.mit.edu/books/artificial-unintelligence}
}


@article{cortbettdavies2022measure,
  title = {The Measure and Mismeasure of Fairness},
  author = {Sam Corbett-Davies and Johann D. Gaebler and Hamed Nilforoshan and Ravi Shroff and Sharad Goel},
  journal = {Working paper},
  year = {2022},
  url = {https://5harad.com/papers/fair-ml.pdf}
}


@inproceedings{dwork2012awareness,
  title={Fairness through awareness},
  author={Dwork, Cynthia and Hardt, Moritz and Pitassi, Toniann and Reingold, Omer and Zemel, Richard},
  booktitle={{ITCS}},
  pages={214--226},
  year={2012},
  url={https://dl.acm.org/doi/abs/10.1145/2090236.2090255}
}


@inproceedings{fazelpour2020algorithmic,
  title={Algorithmic fairness from a non-ideal perspective},
  author={Fazelpour, Sina and Lipton, Zachary C},
  booktitle={{AIES}},
  pages={57--63},
  year={2020},
  url={https://dl.acm.org/doi/10.1145/3375627.3375828}
}


@inproceedings{hardt2016equality,
  author    = {Moritz Hardt and
               Eric Price and
               Nati Srebro},
  title     = {Equality of Opportunity in Supervised Learning},
  booktitle = {{NeurIPS}},
  pages     = {3315--3323},
  year      = {2016},
  url       = {https://proceedings.neurips.cc/paper/2016/hash/9d2682367c3935defcb1f9e247a97c0d-Abstract.html}
}


@article{harrison1978hedonic,
  title={Hedonic housing prices and the demand for clean air},
  author={Harrison, Jr, David and Rubinfeld, Daniel L},
  journal={Journal of environmental economics and management},
  volume={5},
  number={1},
  pages={81--102},
  year={1978},
  publisher={Elsevier},
  url={https://deepblue.lib.umich.edu/bitstream/handle/2027.42/22636/0000186.pdf?sequence=1&isAllowed=y}
}


@inproceedings{madaio2020codesigning,
  title={Co-designing checklists to understand organizational challenges and opportunities around fairness in AI},
  author={Madaio, Michael A and Stark, Luke and Wortman Vaughan, Jennifer and Wallach, Hanna},
  booktitle={{ACM CHI}},
  pages={1--14},
  year={2020},
  url={https://dl.acm.org/doi/10.1145/3313831.3376445}
}


@article{mehrabi2021survey,
  title={A survey on bias and fairness in machine learning},
  author={Mehrabi, Ninareh and Morstatter, Fred and Saxena, Nripsuta and Lerman, Kristina and Galstyan, Aram},
  journal={ACM Computing Surveys (CSUR)},
  volume={54},
  number={6},
  pages={1--35},
  year={2021},
  publisher={ACM New York, NY, USA},
  url={https://dl.acm.org/doi/10.1145/3457607}
}


@incollection{noble2018algorithms,
  title={Algorithms of oppression},
  author={Noble, Safiya Umoja},
  booktitle={Algorithms of oppression},
  year={2018},
  publisher={New York University Press},
  url={https://nyupress.org/9781479837243/algorithms-of-oppression/}
}


@book{oneil2017weapons,
  title={Weapons of math destruction: How big data increases inequality and threatens democracy},
  author={O'Neil, Cathy},
  year={2017},
  publisher={Crown},
  url={https://www.penguinrandomhouse.com/books/241363/weapons-of-math-destruction-by-cathy-oneil/}
}


@inproceedings{selbst2019fairness,
author = {Selbst, Andrew D. and {boyd}, {danah} and Friedler, Sorelle A. and Venkatasubramanian, Suresh and Vertesi, Janet},
title = {Fairness and Abstraction in Sociotechnical Systems},
year = {2019},
isbn = {9781450361255},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://dl.acm.org/doi/10.1145/3287560.3287598},
abstract = {A key goal of the fair-ML community is to develop machine-learning based systems that, once introduced into a social context, can achieve social and legal outcomes such as fairness, justice, and due process. Bedrock concepts in computer science---such as abstraction and modular design---are used to define notions of fairness and discrimination, to produce fairness-aware learning algorithms, and to intervene at different stages of a decision-making pipeline to produce "fair" outcomes. In this paper, however, we contend that these concepts render technical interventions ineffective, inaccurate, and sometimes dangerously misguided when they enter the societal context that surrounds decision-making systems. We outline this mismatch with five "traps" that fair-ML work can fall into even as it attempts to be more context-aware in comparison to traditional data science. We draw on studies of sociotechnical systems in Science and Technology Studies to explain why such traps occur and how to avoid them. Finally, we suggest ways in which technical designers can mitigate the traps through a refocusing of design in terms of process rather than solutions, and by drawing abstraction boundaries to include social actors rather than purely technical ones.},
booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency},
pages = {59–68},
numpages = {10},
keywords = {Fairness-aware Machine Learning, Sociotechnical Systems, Interdisciplinary},
location = {Atlanta, GA, USA},
series = {FAT* '19}
}

@inproceedings{jacobs2021measurement,
author = {Jacobs, Abigail Z. and Wallach, Hanna},
title = {Measurement and Fairness},
year = {2021},
isbn = {9781450383097},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3442188.3445901},
doi = {10.1145/3442188.3445901},
abstract = {We propose measurement modeling from the quantitative social sciences as a framework for understanding fairness in computational systems. Computational systems often involve unobservable theoretical constructs, such as socioeconomic status, teacher effectiveness, and risk of recidivism. Such constructs cannot be measured directly and must instead be inferred from measurements of observable properties (and other unobservable theoretical constructs) thought to be related to them---i.e., operationalized via a measurement model. This process, which necessarily involves making assumptions, introduces the potential for mismatches between the theoretical understanding of the construct purported to be measured and its operationalization. We argue that many of the harms discussed in the literature on fairness in computational systems are direct results of such mismatches. We show how some of these harms could have been anticipated and, in some cases, mitigated if viewed through the lens of measurement modeling. To do this, we contribute fairness-oriented conceptualizations of construct reliability and construct validity that unite traditions from political science, education, and psychology and provide a set of tools for making explicit and testing assumptions about constructs and their operationalizations. We then turn to fairness itself, an essentially contested construct that has different theoretical understandings in different contexts. We argue that this contestedness underlies recent debates about fairness definitions: although these debates appear to be about different operationalizations, they are, in fact, debates about different theoretical understandings of fairness. We show how measurement modeling can provide a framework for getting to the core of these debates.},
booktitle = {Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency},
pages = {375–385},
numpages = {11},
keywords = {construct reliability, measurement, fairness, construct validity},
location = {Virtual Event, Canada},
series = {FAccT '21}
}

@misc{kain1975housing,
  title={Housing Markets and Racial Discrimination: A Microeconomic Analysis},
  author={Kain, John F. and Quigley, John M.},
  year={1975},
  url={https://www.nber.org/books-and-chapters/housing-markets-and-racial-discrimination-microeconomic-analysis}
}

@article{obermeyer2019dissecting,
author = {Ziad Obermeyer  and Brian Powers  and Christine Vogeli  and Sendhil Mullainathan },
title = {Dissecting racial bias in an algorithm used to manage the health of populations},
journal = {Science},
volume = {366},
number = {6464},
pages = {447-453},
year = {2019},
doi = {10.1126/science.aax2342},
URL = {https://www.science.org/doi/abs/10.1126/science.aax2342},
eprint = {https://www.science.org/doi/pdf/10.1126/science.aax2342},
abstract = {The U.S. health care system uses commercial algorithms to guide health decisions. Obermeyer et al. find evidence of racial bias in one widely used algorithm, such that Black patients assigned the same level of risk by the algorithm are sicker than White patients (see the Perspective by Benjamin). The authors estimated that this racial bias reduces the number of Black patients identified for extra care by more than half. Bias occurs because the algorithm uses health costs as a proxy for health needs. Less money is spent on Black patients who have the same level of need, and the algorithm thus falsely concludes that Black patients are healthier than equally sick White patients. Reformulating the algorithm so that it no longer uses costs as a proxy for needs eliminates the racial bias in predicting who needs extra care. Science, this issue p. 447; see also p. 421 A health algorithm that uses health costs as a proxy for health needs leads to racial bias against Black patients. Health systems rely on commercial prediction algorithms to identify and help patients with complex health needs. We show that a widely used algorithm, typical of this industry-wide approach and affecting millions of patients, exhibits significant racial bias: At a given risk score, Black patients are considerably sicker than White patients, as evidenced by signs of uncontrolled illnesses. Remedying this disparity would increase the percentage of Black patients receiving additional help from 17.7 to 46.5\%. The bias arises because the algorithm predicts health care costs rather than illness, but unequal access to care means that we spend less money caring for Black patients than for White patients. Thus, despite health care cost appearing to be an effective proxy for health by some measures of predictive accuracy, large racial biases arise. We suggest that the choice of convenient, seemingly effective proxies for ground truth can be an important source of algorithmic bias in many contexts.}}

@article{crenshaw1991intersectionality,
author = {Kimberlé Crenshaw},
title = {Mapping the Margins: Intersectionality, Identity Politics, and Violence against Women of Color},
journal = {Stanford Law Review},
volume = {43},
number = {6},
pages = {1241-1299},
year = {1991},
doi = {https://doi.org/10.2307/1229039},
URL = {https://www.jstor.org/stable/1229039},
eprint = {https://www.jstor.org/stable/1229039}
}

@inproceedings{zhang2018mitigating,
  title={Mitigating unwanted biases with adversarial learning},
  author={Zhang, Brian Hu and Lemoine, Blake and Mitchell, Margaret},
  booktitle={Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society},
  pages={335--340},
  year={2018},
  url={https://dl.acm.org/doi/pdf/10.1145/3278721.3278779}
}

@misc{Xiang2019legalcompatibility,
  doi = {10.48550/ARXIV.1912.00761},
  url = {https://arxiv.org/abs/1912.00761},
  author = {Xiang, Alice and Raji, Inioluwa Deborah},
  keywords = {Computers and Society (cs.CY), Artificial Intelligence (cs.AI), Machine Learning (cs.LG), Machine Learning (stat.ML), FOS: Computer and information sciences, FOS: Computer and information sciences},
  title = {On the Legal Compatibility of Fairness Definitions},
  publisher = {arXiv},
  year = {2019},
  copyright = {Creative Commons Attribution 4.0 International}
}

@misc{watkins2022fourfifths,
  doi = {10.48550/ARXIV.2202.09519},
  url = {https://arxiv.org/abs/2202.09519},
  author = {Watkins, Elizabeth Anne and McKenna, Michael and Chen, Jiahao},
  keywords = {Computers and Society (cs.CY), Artificial Intelligence (cs.AI), Machine Learning (cs.LG), Logic in Computer Science (cs.LO), FOS: Computer and information sciences, FOS: Computer and information sciences, K.4; K.5; F.4; I.2, 68T27, 03B70},
  title = {The four-fifths rule is not disparate impact: a woeful tale of epistemic trespassing in algorithmic fairness},
  publisher = {arXiv},
  year = {2022},
  copyright = {arXiv.org perpetual, non-exclusive license}
}

@article{bickel1975biasinadmissions,
author = {Bickel, P.J. and Hammel, E.A. and O'Connell, E.W.},
title = {Sex Bias in Graduate Admissions: Data from Berkeley},
journal = {Science},
volume = {187},
number = {4175},
pages = {398-404},
year = {1975},
doi = {10.1126%2Fscience.187.4175.398},
URL = {https://doi.org/10.1126%2Fscience.187.4175.398}
}

@article{strack2014impact,
author = {Strack, Beata and Deshazo, Jonathan and Gennings, Chris and Olmo Ortiz, Juan Luis and Ventura, Sebastian and Cios, Krzysztof and Clore, John},
year = {2014},
month = {04},
pages = {781670},
title = {Impact of HbA1c Measurement on Hospital Readmission Rates: Analysis of 70,000 Clinical Database Patient Records},
volume = {2014},
journal = {BioMed research international},
doi = {10.1155/2014/781670}
}

@misc{strack2014diabetes,
author = {Strack, Beata and Deshazo, Jonathan and Gennings, Chris and Olmo Ortiz, Juan Luis and Ventura, Sebastian and Cios, Krzysztof and Clore, John},
year = {2014},
month = {05},
title = {Diabetes 130-US hospitals for years 1999-2008 Data Set},
URL = {https://archive.ics.uci.edu/ml/datasets/Diabetes+130-US+hospitals+for+years+1999-2008}
}


@book{umojanoble2018algorithmsoppression,
  title = {Algorithms of Oppression},
  author = {Safiya {Umoja Noble}},
  publisher = {NYU Press},
  note = {\url{http://algorithmsofoppression.com/}},
  year = {2018}
}

@inproceedings{barocas2017problem,
  title={The problem with bias: Allocative versus representational harms in machine learning},
  author={Barocas, Solon and Crawford, Kate and Shapiro, Aaron and Wallach, Hanna},
  booktitle={9th Annual conference of the special interest group for computing, information and society},
  year={2017}
}

@inproceedings{shahhosseini2020optimizing,
  title={Optimizing ensemble weights for machine learning models: A case study for housing price prediction},
  author={Shahhosseini, Mohsen and Hu, Guiping and Pham, Hieu},
  booktitle={Smart Service Systems, Operations Management, and Analytics: Proceedings of the 2019 INFORMS International Conference on Service Science},
  pages={87--97},
  year={2020},
  organization={Springer},
  url={https://lib.dr.iastate.edu/cgi/viewcontent.cgi?article=1187&context=imse_conf}
}

@article{tipping1999relevance,
  title={The relevance vector machine},
  author={Tipping, Michael},
  journal={Advances in neural information processing systems},
  volume={12},
  year={1999},
  url={https://proceedings.neurips.cc/paper/1999/file/f3144cefe89a60d6a1afaf7859c5076b-Paper.pdf}
}

@misc{scikitlearn2022ames,
  title={The Ames housing dataset},
  author={Scikit-Learn developers},
  year={2022},
  url={https://inria.github.io/scikit-learn-mooc/python_scripts/datasets_ames_housing.html}
}

@misc{scikitlego2019fairness,
  title={Fairness},
  author={Warmerdam, Vincent and Brouns, Matthijs and Scikit-Lego contributors},
  year={2019},
  url={https://scikit-lego.netlify.app/fairness.html}
}

@misc{carlisle2019racist,
  title={racist data destruction?},
  author={M Carlisle},
  year={2019},
  url={https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8}
}

@misc{uscensusbureaumetropolitan,
  title={Metropolitan Areas},
  author={United States Census Bureau},
  url={https://www.census.gov/history/www/programs/geography/metropolitan_areas.html}
}

@misc{nedlund2019apple,
  title={Apple Card is accused of gender bias. Here’s how that can happen},
  author={Evelina Nedlund},
  url={https://edition.cnn.com/2019/11/12/business/apple-card-gender-bias/index.html},
  year={2019}
}

@inproceedings{chen2019fairness,
  title={Fairness under unawareness: Assessing disparity when protected class is unobserved},
  author={Chen, Jiahao and Kallus, Nathan and Mao, Xiaojie and Svacha, Geoffry and Udell, Madeleine},
  booktitle={Proceedings of the conference on fairness, accountability, and transparency},
  pages={339--348},
  year={2019},
  url={https://dl.acm.org/doi/abs/10.1145/3287560.3287594}
}

@inproceedings{mitchell2019model,
  title={Model cards for model reporting},
  author={Mitchell, Margaret and Wu, Simone and Zaldivar, Andrew and Barnes, Parker and Vasserman, Lucy and Hutchinson, Ben and Spitzer, Elena and Raji, Inioluwa Deborah and Gebru, Timnit},
  booktitle={Proceedings of the conference on fairness, accountability, and transparency},
  pages={220--229},
  year={2019},
  url={https://dl.acm.org/doi/10.1145/3287560.3287596}
}

@misc{dudik2020assessing,
  title={Assessing and mitigating unfairness in credit models with the Fairlearn toolkit},
  author={Dudík, Miroslav and Chen, William and Barocas, Solon and Inchiosa, Mario and Lewins, Nick and Oprescu, Miruna and Qiao, Joy and Sameki, Mehrnoosh and Schlener, Mario and Tuo, Jason and Wallach, Hanna},
  url={https://www.microsoft.com/en-us/research/uploads/prod/2020/09/Fairlearn-EY_WhitePaper-2020-09-22.pdf},
  year={2020}
}

@misc{peyton2020redlining,
  title={Redlining in America: How a history of housing discrimination endures},
  author={Nellie Peyton},
  year={2020},
  url={https://www.context.news/socioeconomic-inclusion/redlining-in-america-how-housing-discrimination-endures}
}

@misc{jan2018redlining,
  author={Tracy Jan},
  year={2018},
  title={Redlining was banned 50 years ago. It's still hurting minorities today.},
  url={https://www.washingtonpost.com/news/wonk/wp/2018/03/28/redlining-was-banned-50-years-ago-its-still-hurting-minorities-today}
}

@article{yeh2009comparisons,
  title={The comparisons of data mining techniques for the predictive accuracy of probability of default of credit card clients},
  author={Yeh, I-Cheng and Lien, Che-hui},
  journal={Expert systems with applications},
  volume={36},
  number={2},
  pages={2473--2480},
  year={2009},
  publisher={Elsevier},
  url={https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients}
}

@misc{uscode2011title15chapter41subchapteriv,
  title={United States Code 2011 Edition - Title 15 Commerce and Trade - Chapter 41 Consumer Credit Protection - Subchapter IV—Equal Credit Opportunity},
  url={https://www.govinfo.gov/content/pkg/USCODE-2011-title15/html/USCODE-2011-title15-chap41-subchapIV.htm}
}

@article{ghassemi2022medicine,
  doi = {10.1016/j.patter.2021.100392},
  url = {https://doi.org/10.1016/j.patter.2021.100392},
  year = {2022},
  month = jan,
  publisher = {Elsevier {BV}},
  volume = {3},
  number = {1},
  pages = {100392},
  author = {Marzyeh Ghassemi and Elaine Okanyene Nsoesie},
  title = {In medicine, how do we machine learn anything real?},
  journal = {Patterns}
}

@article{vyas2020hidden,
  doi = {10.1056/nejmms2004740},
  url = {https://doi.org/10.1056/nejmms2004740},
  year = {2020},
  month = aug,
  publisher = {Massachusetts Medical Society},
  volume = {383},
  number = {9},
  pages = {874--882},
  author = {Darshali A. Vyas and Leo G. Eisenstein and David S. Jones},
  editor = {Debra Malina},
  title = {Hidden in Plain Sight {\textemdash} Reconsidering the Use of Race Correction in Clinical Algorithms},
  journal = {New England Journal of Medicine}
}

@article{sikstrom2022conceptualising,
  doi = {10.1136/bmjhci-2021-100459},
  url = {https://doi.org/10.1136/bmjhci-2021-100459},
  year = {2022},
  month = jan,
  publisher = {{BMJ}},
  volume = {29},
  number = {1},
  pages = {e100459},
  author = {Laura Sikstrom and Marta M Maslej and Katrina Hui and Zoe Findlay and Daniel Z Buchman and Sean L Hill},
  title = {Conceptualising fairness: three pillars for medical algorithms and health equity},
  journal = {{BMJ} Health Care Inform}
}

@article{krieger2012population,
  doi = {10.1111/j.1468-0009.2012.00678.x},
  url = {https://doi.org/10.1111/j.1468-0009.2012.00678.x},
  year = {2012},
  month = dec,
  publisher = {Wiley},
  volume = {90},
  number = {4},
  pages = {634--681},
  author = {Nancy Krieger},
  title = {Who and What Is a {\textquotedblleft}Population{\textquotedblright}? Historical Debates, Current Controversies, and Implications for Understanding {\textquotedblleft}Population Health{\textquotedblright} and Rectifying Health Inequities},
  journal = {Milbank Quarterly}
}

@article{wallace2011interactions,
  doi = {10.1093/carcin/bgr066},
  url = {https://doi.org/10.1093/carcin/bgr066},
  year = {2011},
  month = apr,
  publisher = {Oxford University Press ({OUP})},
  volume = {32},
  number = {8},
  pages = {1107--1121},
  author = {T. A. Wallace and D. N. Martin and S. Ambs},
  title = {Interactions among genes, tumor biology and the environment in cancer health disparities: examining the evidence on a national and global scale},
  journal = {Carcinogenesis}
}

@article{eknoyan2007adolphe,
  doi = {10.1093/ndt/gfm517},
  url = {https://doi.org/10.1093/ndt/gfm517},
  year = {2007},
  month = aug,
  publisher = {Oxford University Press ({OUP})},
  volume = {23},
  number = {1},
  pages = {47--51},
  author = {G. Eknoyan},
  title = {Adolphe Quetelet (1796 1874) the average man and indices of obesity},
  journal = {Nephrology Dialysis Transplantation}
}

@misc{bbc2016dutch,
	author = {BBC News},
	title = {{D}utch men revealed as world's tallest --- bbc.com},
	howpublished = {\url{https://www.bbc.com/news/science-environment-36888541}},
	year = {2016},
	note = {[Accessed 19-Jul-2023]},
}

@misc{endocrine2009widely,
  author = {The Endocrine Society},
  title = {Widely Used Body Fat Measurements Overestimate Fatness In African-Americans, Study Finds},
  howpublished={\url{www.sciencedaily.com/releases/2009/06/090611142407.htm}},
  year={2009},
  journal={ScienceDaily},
  note = {[Accessed 19-Jul-2023]},
}

@article{racette2003obesity,
  doi = {10.1093/ptj/83.3.276},
  url = {https://doi.org/10.1093/ptj/83.3.276},
  year = {2003},
  month = mar,
  publisher = {Oxford University Press ({OUP})},
  volume = {83},
  number = {3},
  pages = {276--288},
  author = {Susan B Racette and Susan S Deusinger and Robert H Deusinger},
  title = {Obesity: Overview of Prevalence,  Etiology,  and Treatment},
  journal = {Physical Therapy}
}

@misc{karasu2016adolphe,
	author = {Sylvia R. Karasu},
	title = {{A}dolphe {Q}uetelet and the {E}volution of {B}ody {M}ass {I}ndex ({B}{M}{I})},
	howpublished = {\url{https://www.psychologytoday.com/us/blog/the-gravity-weight/201603/adolphe-quetelet-and-the-evolution-body-mass-index-bmi}},
	year = {2016},
	note = {[Accessed 19-Jul-2023]},
  journal={Psychology Today}
}

@misc{treviranus2019inclusivedesign,
	author = {Jutta Treviranus},
	title = {{I}nclusive {D}esign: {T}he {B}ell {C}urve, the {S}tarburst and the {V}irtuous {T}ornado - {I}nclusive {D}esign {R}esearch {C}entre},
	howpublished = {\url{https://idrc.ocadu.ca/ideas/inclusive-design-the-bell-curve-the-starburst-and-the-virtuous-tornado/}},
	year = {2019},
	note = {[Accessed 19-Jul-2023]},
}

@article{godreau2021nonsovereign,
  doi = {10.1111/aman.13601},
  url = {https://doi.org/10.1111/aman.13601},
  year = {2021},
  month = jun,
  publisher = {Wiley},
  volume = {123},
  number = {3},
  pages = {509--525},
  author = {Isar Godreau and Yarimar Bonilla},
  title = {Nonsovereign Racecraft: How Colonialism, Debt, and Disaster are Transforming Puerto Rican Racial Subjectivities},
  journal = {American Anthropologist}
}

@misc{npsSojournerTruth,
    author = {US National Park Service},
    title = {{S}ojourner {T}ruth: {A}in't {I} {A} {W}oman?},
    howpublished = {\url{https://www.nps.gov/wori/learn/historyculture/sojourner-truth.htm}},
    year = {},
    note = {[Accessed 19-Jul-2023]},
}

@article{ford2010critical,
  doi = {10.2105/ajph.2009.171058},
  url = {https://doi.org/10.2105/ajph.2009.171058},
  year = {2010},
  month = apr,
  publisher = {American Public Health Association},
  volume = {100},
  number = {S1},
  pages = {S30--S35},
  author = {Chandra L. Ford and Collins O. Airhihenbuwa},
  title = {Critical Race Theory, Race Equity, and Public Health: Toward Antiracism Praxis},
  journal = {American Journal of Public Health}
}

@inproceedings{hanna2020towards,
  title={Towards a critical race methodology in algorithmic fairness},
  author={Hanna, Alex and Denton, Emily and Smart, Andrew and Smith-Loud, Jamila},
  booktitle={Proceedings of the 2020 conference on fairness, accountability, and transparency},
  pages={501--512},
  year={2020}
}

@misc{justiceCanadianHuman,
	author = {},
	title = {{C}anadian {H}uman {R}ights {A}ct --- laws-lois.justice.gc.ca},
	howpublished = {\url{https://laws-lois.justice.gc.ca/eng/acts/h-6/fulltext.html}},
	year = {},
	note = {[Accessed 19-Jul-2023]},
}

@inproceedings{buolamwini2018gender,
  title={Gender shades: Intersectional accuracy disparities in commercial gender classification},
  author={Buolamwini, Joy and Gebru, Timnit},
  booktitle={Conference on fairness, accountability and transparency},
  pages={77--91},
  year={2018},
  organization={PMLR},
  url={http://gendershades.org/index.html},
}

@article{olbert2018meta,
  doi = {10.1037/abn0000309},
  url = {https://doi.org/10.1037/abn0000309},
  year = {2018},
  month = jan,
  publisher = {American Psychological Association ({APA})},
  volume = {127},
  number = {1},
  pages = {104--115},
  author = {Charles M. Olbert and Arundati Nagendra and Benjamin Buck},
  title = {Meta-analysis of Black vs. White racial disparity in schizophrenia diagnosis in the United States: Do structured assessments attenuate racial disparities?},
  journal = {Journal of Abnormal Psychology}
}

@incollection{maslej2022race,
  doi = {10.3233/shti220281},
  url = {https://doi.org/10.3233/shti220281},
  year = {2022},
  month = jun,
  publisher = {{IOS} Press},
  author = {Marta M. Maslej and Nelson Shen and Iman Kassam and Terri Rodak and Laura Sikstrom},
  title = {Race and Racialization in Mental Health Research and Implications for Developing and Evaluating Machine Learning Models: A Rapid Review},
  booktitle = {{MEDINFO} 2021: One World,  One Health {\textendash} Global Partnership for Digital Innovation}
}

@article{belsher2019prediction,
  doi = {10.1001/jamapsychiatry.2019.0174},
  url = {https://doi.org/10.1001/jamapsychiatry.2019.0174},
  year = {2019},
  month = jun,
  publisher = {American Medical Association ({AMA})},
  volume = {76},
  number = {6},
  pages = {642},
  author = {Bradley E. Belsher and Derek J. Smolenski and Larry D. Pruitt and Nigel E. Bush and Erin H. Beech and Don E. Workman and Rebecca L. Morgan and Daniel P. Evatt and Jennifer Tucker and Nancy A. Skopp},
  title = {Prediction Models for Suicide Attempts and Deaths},
  journal = {{JAMA} Psychiatry}
}

@article{gara2019naturalistic,
  doi = {10.1176/appi.ps.201800223},
  url = {https://doi.org/10.1176/appi.ps.201800223},
  year = {2019},
  month = feb,
  publisher = {American Psychiatric Association Publishing},
  volume = {70},
  number = {2},
  pages = {130--134},
  author = {Michael A. Gara and Shula Minsky and Steven M Silverstein and Theresa Miskimen and Stephen M. Strakowski},
  title = {A Naturalistic Study of Racial Disparities in Diagnoses at an Outpatient Behavioral Health Clinic},
  journal = {Psychiatric Services}
}

@article{gara2012influence,
  doi = {10.1001/archgenpsychiatry.2011.2040},
  url = {https://doi.org/10.1001/archgenpsychiatry.2011.2040},
  year = {2012},
  month = jun,
  publisher = {American Medical Association ({AMA})},
  volume = {69},
  number = {6},
  author = {Michael A. Gara and William A. Vega and Stephan Arndt and Michael Escamilla and David E. Fleck and William B. Lawson and Ira Lesser and Harold W. Neighbors and Daniel R. Wilson and Lesley M. Arnold and Stephen M. Strakowski},
  title = {Influence of Patient Race and Ethnicity on Clinical Assessment in Patients With Affective Disorders},
  journal = {Archives of General Psychiatry}
}

@misc{angwin2016machine,
  title={Machine bias},
  author={Angwin, Julia and Larson, Jeff and Mattu, Surya and Kirchner, Lauren},
  year={2016}
}

@misc{kuczmarski2018reducing,
    author = {James Kuczmarski},
    title = {{R}educing gender bias in {G}oogle {T}ranslate},
    howpublished = {\url{https://blog.google/products/translate/reducing-gender-bias-google-translate/}},
    year = {2018},
    note = {[Accessed 31-07-2023]},
}

@article{seyyedkalantari2021underdiagnosis,
  doi = {10.1038/s41591-021-01595-0},
  url = {https://doi.org/10.1038/s41591-021-01595-0},
  year = {2021},
  month = dec,
  publisher = {Springer Science and Business Media {LLC}},
  volume = {27},
  number = {12},
  pages = {2176--2182},
  author = {Laleh Seyyed-Kalantari and Haoran Zhang and Matthew B. A. McDermott and Irene Y. Chen and Marzyeh Ghassemi},
  title = {Underdiagnosis bias of artificial intelligence algorithms applied to chest radiographs in under-served patient populations},
  journal = {Nature Medicine}
}

@misc{banerjee2021readiang,
  doi = {10.48550/ARXIV.2107.10356},
  url = {https://arxiv.org/abs/2107.10356},
  author = {Banerjee, Imon and Bhimireddy, Ananth Reddy and Burns, John L. and Celi, Leo Anthony and Chen, Li-Ching and Correa, Ramon and Dullerud, Natalie and Ghassemi, Marzyeh and Huang, Shih-Cheng and Kuo, Po-Chih and Lungren, Matthew P and Palmer, Lyle and Price, Brandon J and Purkayastha, Saptarshi and Pyrros, Ayis and Oakden-Rayner, Luke and Okechukwu, Chima and Seyyed-Kalantari, Laleh and Trivedi, Hari and Wang, Ryan and Zaiman, Zachary and Zhang, Haoran and Gichoya, Judy W},
  keywords = {Computer Vision and Pattern Recognition (cs.CV), Computers and Society (cs.CY), Image and Video Processing (eess.IV), FOS: Computer and information sciences, FOS: Electrical engineering, electronic engineering, information engineering, I.2, 68-XX},
  title = {Reading Race: AI Recognises Patient's Racial Identity In Medical Images},
  publisher = {arXiv},
  year = {2021},
  copyright = {Creative Commons Attribution 4.0 International}
}

@misc{herbst2022schizophrenia,
  author={Diane Herbst},
  title={Schizophrenia in Black People: Racial Disparities Explained},
  howpublished={\url{https://www.psycom.net/schizophrenia-racial-disparities-black-people}},
  year={2022},
  note = {[Accessed 31-07-2023]},
}

@article{arnold2004ethnicity,
  doi = {10.1016/s0920-9964(02)00497-8},
  url = {https://doi.org/10.1016/s0920-9964(02)00497-8},
  year = {2004},
  month = apr,
  publisher = {Elsevier {BV}},
  volume = {67},
  number = {2-3},
  pages = {207--212},
  author = {Lesley M. Arnold and Paul E. Keck and Jacqueline Collins and Rodgers Wilson and David E. Fleck and Kimberly B. Corey and Jennifer Amicone and Victor R. Adebimpe and Stephen M. Strakowski},
  title = {Ethnicity and first-rank symptoms in patients with psychosis},
  journal = {Schizophrenia Research}
}

@article{weerts2022does,
  title={Does the End Justify the Means? On the Moral Justification of Fairness-Aware Machine Learning},
  author={Weerts, Hilde and Royakkers, Lamb{\`e}r and Pechenizkiy, Mykola},
  journal={arXiv preprint arXiv:2202.08536},
  year={2022}
}

@article{mittelstadt2023unfairness,
  title={The Unfairness of Fair Machine Learning: Levelling down and strict egalitarianism by default},
  author={Mittelstadt, Brent and Wachter, Sandra and Russell, Chris},
  journal={arXiv preprint arXiv:2302.02404},
  year={2023}
}

@article{ding2021retiring,
  title={Retiring adult: New datasets for fair machine learning},
  author={Ding, Frances and Hardt, Moritz and Miller, John and Schmidt, Ludwig},
  journal={Advances in neural information processing systems},
  volume={34},
  pages={6478--6490},
  year={2021},
  url={https://proceedings.neurips.cc/paper_files/paper/2021/file/32e54441e6382a7fbacbbbaf3c450059-Paper.pdf}
}

@misc{kohavi1996adult,
  author       = {Kohavi, Ronny and Becker, Barry},
  title        = {{Adult data set}},
  year         = {1996},
  howpublished = {UCI Machine Learning Repository},
  note         = {{DOI}: https://doi.org/10.24432/C5XW20},
  url          = {https://archive.ics.uci.edu/ml/datasets/adult}
}

@inproceedings{kohavi1996scaling,
  title={Scaling up the accuracy of naive-bayes classifiers: A decision-tree hybrid.},
  author={Kohavi, Ron and others},
  booktitle={Kdd},
  volume={96},
  pages={202--207},
  year={1996},
  url={https://dl.acm.org/doi/10.5555/3001460.3001502}
}

@article{moro2014data,
  title={A data-driven approach to predict the success of bank telemarketing},
  author={Moro, S{\'e}rgio and Cortez, Paulo and Rita, Paulo},
  journal={Decision Support Systems},
  volume={62},
  pages={22--31},
  year={2014},
  publisher={Elsevier},
  url={https://repositorio.iscte-iul.pt/bitstream/10071/9499/5/dss_v3.pdf}
}

@misc{moro2012bank,
  author       = {Moro, S{\'e}rgio and Rita, Paulo and Cortez, Paulo},
  title        = {{Bank Marketing}},
  year         = {2012},
  howpublished = {UCI Machine Learning Repository},
  note         = {{DOI}: https://doi.org/10.24432/C5K306},
  url          = {https://archive.ics.uci.edu/dataset/222/bank+marketing}
}

@misc{vanschoren2014boston,
  author={Joaquin Vanschoren},
  title={boston},
  year={2014},
  howpublished={OpenML},
  url={https://www.openml.org/search?type=data&status=active&id=531}
}

@book{belsley2005regression,
  title={Regression diagnostics: Identifying influential data and sources of collinearity},
  author={Belsley, David A and Kuh, Edwin and Welsch, Roy E},
  year={2005},
  publisher={John Wiley \& Sons}
}

@misc{census2023group,
  title={Group Quarters and Residence Rules for Poverty},
  author={United States Census Bureau},
  url={https://www.census.gov/topics/income-poverty/poverty/guidance/group-quarters.html},
}

@misc{census2019pums,
  title={2018 ACS PUMS Documentation},
  author={United States Census Bureau},
  year={2019},
  url={https://www.census.gov/programs-surveys/acs/microdata/documentation.2018.html}
}

@misc{sykes2020boston,
  author={Jamie Sykes},
  year={2020},
  title={- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town},
  url={https://github.com/scikit-learn/scikit-learn/issues/16155}
}

@misc{wikibooks2010monotone,
	title = {Algorithm Implementation/Geometry/Convex hull/Monotone chain},
  author={Wikibooks},
	howpublished = {\url{https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain}},
	year = {2010},
	note = {[Accessed 09-Jul-2023]},
}

@misc{barocas2017tutorial,
  title={{NIPS} 2017 Tutorial on Fairness in Machine Learning},
  author={Barocas, Solon and Hardt, Moritz},
  url={https://fairmlbook.org/tutorial1.html},
  year={2017}
}

@inproceedings{barocas2021disagg,
  author    = {Solon Barocas and Anhong Guo and Ece Kamar and Jacquelyn Krones and Meredith Ringel Morris and Jennifer Wortman Vaughan and W. Duncan Wadsworth and Hanna Wallach},
  title     = {Designing Disaggregated Evaluations of AI Systems: Choices, Considerations, and Tradeoffs},
  booktitle    = {Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society},
  pages     = {368–378},
  year      = {2021},
  url       = {https://dl.acm.org/doi/10.1145/3461702.3462610}
}

@inproceedings{madaio2022assess,
  author    = {Michael Madaio and Lisa Egede and Hariharan Subramonyam and Jennifer Wortman Vaughan and and Hanna Wallach},
  title     = {Assessing the Fairness of AI Systems: AI Practitioners’ Processes, Challenges, and Needs for Support},
  booktitle = {Proceedings of the ACM on Human-Computer Interaction},
  pages     = {1-26},
  year      = {2022},
  url       = {https://dl.acm.org/doi/10.1145/3512899}
}

@article{reed2024stakeholder,
  title={Reimagining the language of engagement in a post-stakeholder world},
  author={Mark S. Reed and Bethann Garramon Merkle, and Elizabeth J. Cook and others},
  journal={Sustainability Science},
  volume={19},
  pages={1481-1490},
  year={2024},
  publisher={Springer Nature},
  url={https://link.springer.com/article/10.1007/s11625-024-01496-4}
}
