@inproceedings{ee7860968080407cb7961073b2c2dc36,
title = "Maximizing the margin with boosting",
abstract = "AdaBoost produces a linear combination of weak hypotheses. It has been observed that the generalization error of the algorithm continues to improve even after all examples are classified correctly by the current linear combination, i.e. by a hyperplane in feature space spanned by the weak hypotheses. The improvement is attributed to the experimental observation that the distances (margins) of the examples to the separating hyperplane are increasing even when the training error is already zero, that is all examples are on the correct side of the hyperplane. We give an iterative version of AdaBoost that explicitly maximizes the minimum margin of the examples. We bound the number of iterations and the number of hypotheses used in the final linear combination which approximates the maximum margin hyperplane with a certain precision. Our modified algorithm essentially retains the exponential convergence properties of AdaBoost and our result does not depend on the size of the hypothesis class.",
author = "Gunnar R{\"a}tsch and Warmuth, {Manfred K.}",
note = "Publisher Copyright: {\textcopyright} Springer-Verlag Berlin Heidelberg 2002.; 15th Annual Conference on Computational Learning Theory, COLT 2002 ; Conference date: 08-07-2002 Through 10-07-2002",
year = "2002",
doi = "10.1007/3-540-45435-7_23",
language = "English",
series = "Lecture Notes in Artificial Intelligence (Subseries of Lecture Notes in Computer Science)",
publisher = "Springer Verlag",
pages = "334--350",
editor = "Jyrki Kivinen and Sloan, {Robert H.}",
booktitle = "Computational Learning Theory - 15th Annual Conference on Computational Learning Theory, COLT 2002, Proceedings",
address = "Germany",
}