import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib
from matplotlib import pyplot as plt
C:\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters
matplotlib.rcParams.update({'font.size': 22})
На ваш выбор взять выборку и тип нейронной сети. Согласовать объем выборки и сложность нейронной сети.
В процедуру оптимизации нейронной сети вставить код, получающий ошибку на тестовой и контрольной выборках или воспользоваться встроенными процедурами.
fashion_mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train),(x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train / 10 , x_test / 10
def drow_acc(history) :
plt.figure(figsize = (9,6))
plt.grid(True)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc = 'upper left')
plt.show()
def drow_loss(history) :
plt.figure(figsize = (9,6))
plt.grid(True)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc = 'upper left')
plt.show()
baseline
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(), #выравнивание слоев
tf.keras.layers.Dense(512, activation = tf.nn.relu), #сколько на выходе узлов будет в слое и с какой функцией активации
tf.keras.layers.Dropout(0.2), #справляемся с переобучением
tf.keras.layers.Dense(10, activation = tf.nn.softmax)
])
model.compile(optimizer = 'adam', #метод Адама стохастической оптимизации
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
hist = model.fit(x_train, y_train, epochs = 10, validation_data = (x_test,y_test))
Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 19s 313us/step - loss: 7.9753 - acc: 0.4939 - val_loss: 7.9647 - val_acc: 0.4991 Epoch 2/10 60000/60000 [==============================] - 18s 308us/step - loss: 7.4649 - acc: 0.5273 - val_loss: 6.8682 - val_acc: 0.5611 Epoch 3/10 60000/60000 [==============================] - 18s 304us/step - loss: 6.6241 - acc: 0.5783 - val_loss: 6.6014 - val_acc: 0.5789 Epoch 4/10 60000/60000 [==============================] - 19s 314us/step - loss: 6.3401 - acc: 0.5950 - val_loss: 6.2829 - val_acc: 0.5972 Epoch 5/10 60000/60000 [==============================] - 19s 316us/step - loss: 5.2890 - acc: 0.6563 - val_loss: 3.8539 - val_acc: 0.7423 Epoch 6/10 60000/60000 [==============================] - 18s 306us/step - loss: 2.5838 - acc: 0.7729 - val_loss: 0.5972 - val_acc: 0.8185 Epoch 7/10 60000/60000 [==============================] - 18s 300us/step - loss: 0.4720 - acc: 0.8448 - val_loss: 0.4754 - val_acc: 0.8489 Epoch 8/10 60000/60000 [==============================] - 18s 299us/step - loss: 0.4009 - acc: 0.8628 - val_loss: 0.4635 - val_acc: 0.8452 Epoch 9/10 60000/60000 [==============================] - 18s 302us/step - loss: 0.3743 - acc: 0.8682 - val_loss: 0.4236 - val_acc: 0.8524 Epoch 10/10 60000/60000 [==============================] - 18s 299us/step - loss: 0.3542 - acc: 0.8735 - val_loss: 0.4175 - val_acc: 0.8555
drow_acc(hist)
drow_loss(hist)
Для разных объемов выборки построить графики кривой обучения, показывающие различные скорости обучения.
hist_list = []
for part in np.linspace(0.33, 1, num = 3) :
print('part %s' % part)
element_cnt = int(x_train.shape[0] * part)
x_part_train = x_train[ :element_cnt ,:,:]
y_part_train = y_train[ :element_cnt]
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation = tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation = tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
hist = model.fit(x_part_train, y_part_train, epochs = 10, validation_data=(x_test,y_test))
hist_list.append(hist.history)
part 0.33 Train on 19800 samples, validate on 10000 samples Epoch 1/10 19800/19800 [==============================] - 7s 337us/step - loss: 10.1270 - acc: 0.3687 - val_loss: 10.2306 - val_acc: 0.3642 Epoch 2/10 19800/19800 [==============================] - 6s 327us/step - loss: 10.0468 - acc: 0.3753 - val_loss: 10.0385 - val_acc: 0.3766 Epoch 3/10 19800/19800 [==============================] - 6s 315us/step - loss: 10.0156 - acc: 0.3779 - val_loss: 10.1397 - val_acc: 0.3701 Epoch 4/10 19800/19800 [==============================] - 7s 337us/step - loss: 10.0572 - acc: 0.3754 - val_loss: 10.1212 - val_acc: 0.3712 Epoch 5/10 19800/19800 [==============================] - 6s 310us/step - loss: 9.9894 - acc: 0.3797 - val_loss: 10.0415 - val_acc: 0.3767 Epoch 6/10 19800/19800 [==============================] - 6s 324us/step - loss: 10.0325 - acc: 0.3769 - val_loss: 10.3783 - val_acc: 0.3557 Epoch 7/10 19800/19800 [==============================] - 6s 313us/step - loss: 10.0174 - acc: 0.3781 - val_loss: 10.2742 - val_acc: 0.3620 Epoch 8/10 19800/19800 [==============================] - 6s 313us/step - loss: 9.9984 - acc: 0.3793 - val_loss: 10.0078 - val_acc: 0.3786 Epoch 9/10 19800/19800 [==============================] - 6s 325us/step - loss: 9.9554 - acc: 0.3820 - val_loss: 10.0046 - val_acc: 0.3792 Epoch 10/10 19800/19800 [==============================] - 6s 311us/step - loss: 9.6556 - acc: 0.4004 - val_loss: 9.6483 - val_acc: 0.3999 part 0.665 Train on 39900 samples, validate on 10000 samples Epoch 1/10 39900/39900 [==============================] - 12s 312us/step - loss: 3.4141 - acc: 0.7051 - val_loss: 0.5240 - val_acc: 0.8117 Epoch 2/10 39900/39900 [==============================] - 12s 305us/step - loss: 0.4465 - acc: 0.8391 - val_loss: 0.4433 - val_acc: 0.8434 Epoch 3/10 39900/39900 [==============================] - 12s 303us/step - loss: 0.4017 - acc: 0.8552 - val_loss: 0.4503 - val_acc: 0.8351 Epoch 4/10 39900/39900 [==============================] - 12s 302us/step - loss: 0.3739 - acc: 0.8647 - val_loss: 0.4315 - val_acc: 0.8480 Epoch 5/10 39900/39900 [==============================] - 12s 306us/step - loss: 0.3551 - acc: 0.8710 - val_loss: 0.4778 - val_acc: 0.8387 Epoch 6/10 39900/39900 [==============================] - 13s 337us/step - loss: 0.3413 - acc: 0.8764 - val_loss: 0.4307 - val_acc: 0.8493 Epoch 7/10 39900/39900 [==============================] - 13s 329us/step - loss: 0.3347 - acc: 0.8780 - val_loss: 0.4234 - val_acc: 0.8517 Epoch 8/10 39900/39900 [==============================] - 13s 317us/step - loss: 0.3259 - acc: 0.8817 - val_loss: 0.4378 - val_acc: 0.8579 Epoch 9/10 39900/39900 [==============================] - 13s 315us/step - loss: 0.3112 - acc: 0.8849 - val_loss: 0.4062 - val_acc: 0.8614 Epoch 10/10 39900/39900 [==============================] - 13s 316us/step - loss: 0.3052 - acc: 0.8882 - val_loss: 0.4180 - val_acc: 0.8578 part 1.0 Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 19s 322us/step - loss: 10.2010 - acc: 0.3635 - val_loss: 10.1821 - val_acc: 0.3672 Epoch 2/10 60000/60000 [==============================] - 19s 311us/step - loss: 10.1530 - acc: 0.3688 - val_loss: 10.1729 - val_acc: 0.3683 Epoch 3/10 60000/60000 [==============================] - 19s 315us/step - loss: 10.1631 - acc: 0.3689 - val_loss: 10.2097 - val_acc: 0.3662 Epoch 4/10 60000/60000 [==============================] - 19s 311us/step - loss: 10.0809 - acc: 0.3740 - val_loss: 10.1089 - val_acc: 0.3727 Epoch 5/10 60000/60000 [==============================] - 19s 316us/step - loss: 9.9285 - acc: 0.3831 - val_loss: 10.1109 - val_acc: 0.3720 Epoch 6/10 60000/60000 [==============================] - 18s 299us/step - loss: 9.6690 - acc: 0.3992 - val_loss: 9.3253 - val_acc: 0.4207 Epoch 7/10 60000/60000 [==============================] - 18s 307us/step - loss: 9.3945 - acc: 0.4164 - val_loss: 9.2231 - val_acc: 0.4272 Epoch 8/10 60000/60000 [==============================] - 18s 303us/step - loss: 9.2669 - acc: 0.4243 - val_loss: 9.5639 - val_acc: 0.4059 Epoch 9/10 60000/60000 [==============================] - 19s 308us/step - loss: 9.2974 - acc: 0.4225 - val_loss: 9.4728 - val_acc: 0.4114 Epoch 10/10 60000/60000 [==============================] - 18s 303us/step - loss: 9.1869 - acc: 0.4296 - val_loss: 9.1464 - val_acc: 0.4320
hist_list[0].keys()
dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
for key in hist_list[0].keys() :
plt.figure(figsize = (16, 12))
plt.grid(True)
plt.title('model ' + key)
plt.ylabel(key)
plt.xlabel('epoch')
for hist in hist_list :
plt.plot(hist[key])
#file_neme = 'plots/' + key + '.png'
#plt.savefig(fname=file_neme,format='png')
plt.show()
Наложить на выборку шум. Например, перемешать независимые переменные или добавить случайный шум в зависимые, на ваш выбор. Построить несколько графиков кривой обучения в условиях наложенного шума с разной дисперсией. Зашумлённые данне должны оставаться картинкой. (лежать в диапазоне от 0 до 1)
Пускай шум имеет нормально распределение. Тогда : $$mean = 0.5$$ $$ 3 \sigma \lt 0.5 $$
hist_list = []
for noise_koef in np.linspace(0, 1, num = 5) :
noise = np.random.normal(loc = 0.5, scale = 0.16, size = x_train.shape)
x_part_noise = x_train + noise_koef * noise
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation = tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation = tf.nn.softmax)
])
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
hist = model.fit(x_part_noise, y_train, epochs = 10, validation_data = (x_test,y_test))
hist_list.append(hist.history)
Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 18s 305us/step - loss: 10.1849 - acc: 0.3616 - val_loss: 9.9119 - val_acc: 0.3814 Epoch 2/10 60000/60000 [==============================] - 18s 295us/step - loss: 8.5549 - acc: 0.4647 - val_loss: 7.7085 - val_acc: 0.5173 Epoch 3/10 60000/60000 [==============================] - 18s 297us/step - loss: 7.5360 - acc: 0.5275 - val_loss: 7.1768 - val_acc: 0.5508 Epoch 4/10 60000/60000 [==============================] - 18s 296us/step - loss: 7.2697 - acc: 0.5454 - val_loss: 7.1540 - val_acc: 0.5530 Epoch 5/10 60000/60000 [==============================] - 18s 299us/step - loss: 6.9348 - acc: 0.5639 - val_loss: 6.9329 - val_acc: 0.5637 Epoch 6/10 60000/60000 [==============================] - 18s 299us/step - loss: 6.6123 - acc: 0.5843 - val_loss: 6.6064 - val_acc: 0.5847 Epoch 7/10 60000/60000 [==============================] - 18s 297us/step - loss: 6.5737 - acc: 0.5872 - val_loss: 6.5540 - val_acc: 0.5878 Epoch 8/10 60000/60000 [==============================] - 18s 298us/step - loss: 6.5324 - acc: 0.5907 - val_loss: 6.4814 - val_acc: 0.5945 Epoch 9/10 60000/60000 [==============================] - 18s 303us/step - loss: 6.4644 - acc: 0.5948 - val_loss: 6.4625 - val_acc: 0.5945 Epoch 10/10 60000/60000 [==============================] - 19s 310us/step - loss: 6.4839 - acc: 0.5935 - val_loss: 6.5119 - val_acc: 0.5922 Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 19s 323us/step - loss: 12.0047 - acc: 0.2531 - val_loss: 12.8891 - val_acc: 0.2001 Epoch 2/10 60000/60000 [==============================] - 18s 303us/step - loss: 12.5643 - acc: 0.2200 - val_loss: 12.8848 - val_acc: 0.2006 Epoch 3/10 60000/60000 [==============================] - 18s 306us/step - loss: 12.3139 - acc: 0.2356 - val_loss: 12.0815 - val_acc: 0.2501 Epoch 4/10 60000/60000 [==============================] - 18s 303us/step - loss: 11.9899 - acc: 0.2559 - val_loss: 11.9091 - val_acc: 0.2609 Epoch 5/10 60000/60000 [==============================] - 18s 304us/step - loss: 11.6193 - acc: 0.2786 - val_loss: 11.9012 - val_acc: 0.2614 Epoch 6/10 60000/60000 [==============================] - 20s 332us/step - loss: 10.7811 - acc: 0.3306 - val_loss: 10.7168 - val_acc: 0.3350 Epoch 7/10 60000/60000 [==============================] - 19s 314us/step - loss: 10.7538 - acc: 0.3325 - val_loss: 10.7181 - val_acc: 0.3348 Epoch 8/10 60000/60000 [==============================] - 19s 315us/step - loss: 10.7606 - acc: 0.3321 - val_loss: 10.8319 - val_acc: 0.3273 Epoch 9/10 60000/60000 [==============================] - 20s 328us/step - loss: 10.7387 - acc: 0.3336 - val_loss: 10.7600 - val_acc: 0.3324 Epoch 10/10 60000/60000 [==============================] - 20s 329us/step - loss: 10.7175 - acc: 0.3348 - val_loss: 10.7666 - val_acc: 0.3318 Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 20s 341us/step - loss: 11.0639 - acc: 0.3113 - val_loss: 11.4408 - val_acc: 0.2893 Epoch 2/10 60000/60000 [==============================] - 19s 321us/step - loss: 10.5886 - acc: 0.3420 - val_loss: 10.9151 - val_acc: 0.3217 Epoch 3/10 60000/60000 [==============================] - 19s 322us/step - loss: 9.6044 - acc: 0.4027 - val_loss: 9.2146 - val_acc: 0.4275 Epoch 4/10 60000/60000 [==============================] - 21s 345us/step - loss: 9.1694 - acc: 0.4301 - val_loss: 9.2940 - val_acc: 0.4226 Epoch 5/10 60000/60000 [==============================] - 24s 392us/step - loss: 9.2481 - acc: 0.4253 - val_loss: 9.1220 - val_acc: 0.4333 Epoch 6/10 60000/60000 [==============================] - 25s 410us/step - loss: 9.2655 - acc: 0.4243 - val_loss: 9.1801 - val_acc: 0.4299 Epoch 7/10 60000/60000 [==============================] - 20s 325us/step - loss: 9.1824 - acc: 0.4295 - val_loss: 9.1199 - val_acc: 0.4335 Epoch 8/10 60000/60000 [==============================] - 19s 321us/step - loss: 9.1478 - acc: 0.4320 - val_loss: 9.2313 - val_acc: 0.4270 Epoch 9/10 60000/60000 [==============================] - 19s 313us/step - loss: 9.1506 - acc: 0.4318 - val_loss: 9.2209 - val_acc: 0.4273 Epoch 10/10 60000/60000 [==============================] - 19s 318us/step - loss: 9.1128 - acc: 0.4342 - val_loss: 9.1800 - val_acc: 0.4301 Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 20s 326us/step - loss: 11.6449 - acc: 0.2742 - val_loss: 11.5492 - val_acc: 0.2814 Epoch 2/10 60000/60000 [==============================] - 19s 312us/step - loss: 11.4651 - acc: 0.2867 - val_loss: 11.4507 - val_acc: 0.2882 Epoch 3/10 60000/60000 [==============================] - 19s 312us/step - loss: 10.7106 - acc: 0.3334 - val_loss: 10.0526 - val_acc: 0.3750 Epoch 4/10 60000/60000 [==============================] - 20s 339us/step - loss: 10.0313 - acc: 0.3761 - val_loss: 9.9031 - val_acc: 0.3847 Epoch 5/10 60000/60000 [==============================] - 21s 356us/step - loss: 9.9743 - acc: 0.3800 - val_loss: 9.9359 - val_acc: 0.3828 Epoch 6/10 60000/60000 [==============================] - 21s 352us/step - loss: 9.9837 - acc: 0.3795 - val_loss: 10.0911 - val_acc: 0.3731 Epoch 7/10 60000/60000 [==============================] - 18s 306us/step - loss: 9.9646 - acc: 0.3808 - val_loss: 9.9179 - val_acc: 0.3841 Epoch 8/10 60000/60000 [==============================] - 19s 316us/step - loss: 9.9254 - acc: 0.3834 - val_loss: 10.0259 - val_acc: 0.3776 Epoch 9/10 60000/60000 [==============================] - 21s 346us/step - loss: 9.5883 - acc: 0.4043 - val_loss: 8.9116 - val_acc: 0.4461 Epoch 10/10 60000/60000 [==============================] - 18s 298us/step - loss: 8.7952 - acc: 0.4533 - val_loss: 8.7701 - val_acc: 0.4545 Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 21s 358us/step - loss: 10.3926 - acc: 0.3529 - val_loss: 10.4228 - val_acc: 0.3524 Epoch 2/10 60000/60000 [==============================] - 17s 291us/step - loss: 9.7527 - acc: 0.3936 - val_loss: 9.0285 - val_acc: 0.4386 Epoch 3/10 60000/60000 [==============================] - 19s 312us/step - loss: 8.9002 - acc: 0.4467 - val_loss: 8.8137 - val_acc: 0.4528 Epoch 4/10 60000/60000 [==============================] - 22s 360us/step - loss: 8.8689 - acc: 0.4491 - val_loss: 8.8500 - val_acc: 0.4498 Epoch 5/10 60000/60000 [==============================] - 18s 301us/step - loss: 8.8990 - acc: 0.4473 - val_loss: 8.7906 - val_acc: 0.4540 Epoch 6/10 60000/60000 [==============================] - 19s 312us/step - loss: 8.3000 - acc: 0.4842 - val_loss: 8.7912 - val_acc: 0.4539 Epoch 7/10 60000/60000 [==============================] - 20s 341us/step - loss: 7.9042 - acc: 0.5089 - val_loss: 7.9258 - val_acc: 0.5070 Epoch 8/10 60000/60000 [==============================] - 21s 347us/step - loss: 7.8725 - acc: 0.5109 - val_loss: 7.9459 - val_acc: 0.5067 Epoch 9/10 60000/60000 [==============================] - 24s 395us/step - loss: 7.8983 - acc: 0.5093 - val_loss: 7.8774 - val_acc: 0.5107 Epoch 10/10 60000/60000 [==============================] - 24s 405us/step - loss: 7.9449 - acc: 0.5066 - val_loss: 7.9107 - val_acc: 0.5088
for key in hist_list[0].keys() :
plt.figure(figsize = (16,12))
plt.grid(True)
plt.title('model with noise ' + key)
plt.ylabel(key)
plt.xlabel('epoch')
for hist in hist_list :
plt.plot(hist[key])
#file_neme = 'plots/' + key + ' noise.png'
#plt.savefig(fname=file_neme,format='png')
plt.show()