@misc{Zaniewska_Aleksandra_Fairness_2022, author={Zaniewska, Aleksandra}, contributor={Dudycz, Helena. Redaktor}, year={2022}, rights={Pewne prawa zastrzeżone na rzecz Autorów i Wydawcy}, publisher={Wydawnictwo Uniwersytetu Ekonomicznego we Wrocławiu}, description={Informatyka w biznesie / pod red. Heleny Dudycz. - Wrocław: Wydawnictwo Uniwersytetu Ekonomicznego we Wrocławiu, 2022, s. 104-116}, language={eng}, abstract={The author investigates the problem of biases occuring in machine learning algorithms, and the strategies for their identification and mitigation. The biases are classified into three main categories: bias in data, bias in algorithms and bias generated by users. The German credit data set used in this article comes from the UCL Machine Learning Repository and represents credit risk assigned to the applicants applying for credit from the bank. The two machine learning algorithms: Random Forest and XGBoost are trained on this data set, and they are then analysed for the presence of gender bias. Subsequently, pre-processing mitigation bias techniques are used to minimize the impact of gender bias. It is identified that both algorithms have bias present and the False Negative Rate for females is the most common problem. The mitigation strategies help reduce bias but do not reduce them completely.}, title={Fairness in machine learning – bias identification and reduction}, type={rozdział}, keywords={machine learning, classification algorithms, bias identification, bias mitigation, uczenie maszynowe, algorytmy, klasyfikacja, identyfikacja stronniczości, łagodzenie wpływu stronniczości}, }