@ARTICLE{Chawla_03, AUTHOR = {N. V. Chawla and T. E. Moore and L. O. Hall and K. W. Bowyer and W. P. Kegelmeyer and C. Springer}, TITLE = {{Distributed Learning With Bagging-like Performance}}, JOURNAL = {{Pattern Recognition Letters}}, YEAR = {2003}, VOLUME = {24}, NUMBER = {}, PAGES = {455--471}, MONTH = {}, NOTE = {}, KEY = {}, KEYWORDS = {distributed learning/mining, bagging, decision trees, neural network}, ISBN = {}, URL = {http://dx.doi.org/10.1016/S0167-8655(02)00269-6}, ABSTRACT = {Bagging forms a committee of classifiers by bootstrap aggregation of training sets from a pool of training data. A simple alternative to bagging is to partition the data into disjoint subsets. Experiments with decision tree and neural network classifiers on various datasets show that, given the same size partitions and bags, disjoint partitions result in performance equivalent to, or better than, bootstrap aggregates (bags). Many applications (e.g., protein structure prediction) involve use of datasets that are too large to handle in the memory of the typical computer. Hence, bagging with samples the size of the data is impractical. Our results indicate that, in such applications, the simple approach of creating a committee of n classifiers from disjoint partitions each of size 1/n (which will be memory resident during learning) in a distributed way results in a classifier which has a bagging-like performance gain. The use of distributed disjoint partitions in learning is significantly less complex and faster than bagging.}, }