直接上代碼:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
fig_acc2 = np.zeros([n_epoch]) for epoch in range (n_epoch): start_time = time.time() #training train_loss, train_acc, n_batch = 0 , 0 , 0 for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle = True ): _,err,ac = sess.run([train_op,loss,acc], feed_dict = {x: x_train_a, y_: y_train_a}) train_loss + = err; train_acc + = ac; n_batch + = 1 summary_str = sess.run(merged_summary_op,feed_dict = {x: x_train_a, y_: y_train_a}) summary_writer.add_summary(summary_str, epoch) print ( " train loss: %f" % (np. sum (train_loss) / n_batch)) print ( " train acc: %f" % (np. sum (train_acc) / n_batch)) fig_loss[epoch] = np. sum (train_loss) / n_batch fig_acc1[epoch] = np. sum (train_acc) / n_batch #validation val_loss, val_acc, n_batch = 0 , 0 , 0 for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle = False ): err, ac = sess.run([loss,acc], feed_dict = {x: x_val_a, y_: y_val_a}) val_loss + = err; val_acc + = ac; n_batch + = 1 print ( " validation loss: %f" % (np. sum (val_loss) / n_batch)) print ( " validation acc: %f" % (np. sum (val_acc) / n_batch)) fig_acc2[epoch] = np. sum (val_acc) / n_batch # 訓(xùn)練loss圖 fig, ax1 = plt.subplots() lns1 = ax1.plot(np.arange(n_epoch), fig_loss, label = "Loss" ) ax1.set_xlabel( 'iteration' ) ax1.set_ylabel( 'training loss' ) # 訓(xùn)練和驗(yàn)證兩種準(zhǔn)確率曲線圖放在一張圖中 fig2, ax2 = plt.subplots() ax3 = ax2.twinx() #由ax2圖生成ax3圖 lns2 = ax2.plot(np.arange(n_epoch), fig_acc1, label = "Loss" ) lns3 = ax3.plot(np.arange(n_epoch), fig_acc2, label = "Loss" ) ax2.set_xlabel( 'iteration' ) ax2.set_ylabel( 'training acc' ) ax3.set_ylabel( 'val acc' ) # 合并圖例 lns = lns3 + lns2 labels = [ "train acc" , "val acc" ] plt.legend(lns, labels, loc = 7 ) plt.show() |
結(jié)果:
補(bǔ)充知識(shí):tensorflow2.x實(shí)時(shí)繪制訓(xùn)練時(shí)的損失和準(zhǔn)確率
我就廢話不多說(shuō)了,大家還是直接看代碼吧!
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
sgd = SGD(lr = float (model_value[ 3 ]), decay = 1e - 6 , momentum = 0.9 , nesterov = True ) model. compile (loss = 'categorical_crossentropy' , optimizer = sgd, metrics = [ 'accuracy' ]) # validation_split:0~1之間的浮點(diǎn)數(shù),用來(lái)指定訓(xùn)練集的一定比例數(shù)據(jù)作為驗(yàn)證集 history = model.fit( self .x_train, self .y_train, batch_size = self .batch_size, epochs = self .epoch_size, class_weight = 'auto' , validation_split = 0.1 ) # 繪制訓(xùn)練 & 驗(yàn)證的準(zhǔn)確率值 plt.plot(history.history[ 'accuracy' ]) plt.plot(history.history[ 'val_accuracy' ]) plt.title( 'Model accuracy' ) plt.ylabel( 'Accuracy' ) plt.xlabel( 'Epoch' ) plt.legend([ 'Train' , 'Test' ], loc = 'upper left' ) plt.show() # 繪制訓(xùn)練 & 驗(yàn)證的損失值 plt.plot(history.history[ 'loss' ]) plt.plot(history.history[ 'val_loss' ]) plt.title( 'Model loss' ) plt.ylabel( 'Loss' ) plt.xlabel( 'Epoch' ) plt.legend([ 'Train' , 'Test' ], loc = 'upper left' ) plt.show() print ( "savemodel---------------" ) model.save(os.path.join(model_value[ 0 ], 'model3_3.h5' )) #輸出損失和精確度 score = model.evaluate( self .x_test, self .y_test, batch_size = self .batch_size) |
以上這篇在tensorflow下利用plt畫論文中l(wèi)oss,acc等曲線圖實(shí)例就是小編分享給大家的全部?jī)?nèi)容了,希望能給大家一個(gè)參考,也希望大家多多支持服務(wù)器之家。
原文鏈接:https://blog.csdn.net/qq_40994943/article/details/86651941