diff --git a/figure_1.png b/figure_1.png new file mode 100644 index 0000000..993014e Binary files /dev/null and b/figure_1.png differ diff --git a/machine_learning.py b/machine_learning.py new file mode 100644 index 0000000..12badae --- /dev/null +++ b/machine_learning.py @@ -0,0 +1,38 @@ +""" Exploring learning curves for classification of handwritten digits """ + +import matplotlib.pyplot as plt +import numpy +from sklearn.datasets import * +from sklearn.cross_validation import train_test_split +from sklearn.linear_model import LogisticRegression + +data = load_digits() +print data.DESCR +num_trials = 50 +train_percentages = range(5,95,5) +test_accuracies = numpy.zeros(len(train_percentages)) + +# train a model with training percentages between 5 and 90 (see train_percentages) and evaluate +# the resultant accuracy. +# You should repeat each training percentage num_trials times to smooth out variability +# for consistency with the previous example use model = LogisticRegression(C=10**-10) for your learner +for i in range(len(train_percentages)): + accuracy = [] + for j in range(num_trials): + X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size = train_percentages[i]/100.0) + + model = LogisticRegression(C=10**-10) + model.fit(X_train, y_train) + score = model.score(X_test, y_test) + + accuracy.append(score) + + test_accuracies[i] = numpy.average(accuracy) + + + +fig = plt.figure() +plt.plot(train_percentages, test_accuracies) +plt.xlabel('Percentage of Data Used for Training') +plt.ylabel('Accuracy on Test Set') +plt.show() diff --git a/questions.txt b/questions.txt new file mode 100644 index 0000000..8c9d3a1 --- /dev/null +++ b/questions.txt @@ -0,0 +1,4 @@ +1. General Trend: The Accuracy of the test increases as the percentage of data used for training increases. There are some points in the graph where the accuracy of the test actually decreases as the percentage of data used for training increases, but overall, we would say that these two are positively correlated. +2. The curve seems to be noisier in the lower percentage of data used for training. It may be because there are less observations being used for building the model so extreme outliers have a heavier effect on the accuracy of the test. +3. 100 trials made it a nice smooth curve. Any trials greater than 100 will make the curve smooth. +4. Decreasing C (by increasing the number that is being raised to the power of -10)results in better accuracy in the higher half of the percentage of data used, while increasing C results in better accuracy in the lower half of the percentage of data used. \ No newline at end of file