0


Tensorflow实现训练数据的加载—模型搭建训练保存—模型调用和加载全流程

1.Tesorflow训练模型的数据加载

  • ** 将tensorflow的训练数据数组(矩阵)保存为.npy的数据格式。为后续的模型训练提供便捷的方法。例如如下:**
import numpy as np
x=np.random.rand(100,7,9)#x是训练数据,这有100条数据,每一条有7*9个特征
np.save(r"C:\结果\y_train_feature.npy",feature)#feature是训练数据矩阵
  • 加载.npy训练数据和测试数组(矩阵),加载后需要调整数据的形状以满足设计模型的输入输出需求,不然无法训练模型。
import numpy as np

 '''加载训练和测试数据'''
y_train_feature=np.load('C:\结果\y_train_feature.npy')
y_test_feature=np.load('C:\结果\y_test_feature.npy')

'''加载训练和测试的标签'''
y_train=np.load('C:\结果\y_train.npy')
y_test=np.load('C:\结果\y_test.npy')
    
'''调整数据为模型输入输出所需要的形状'''
y_test_feature=y_test_feature.reshape(-1,7,129)
y_train_feature=y_train_feature.reshape(-1,7,129)
y_test=y_test.reshape(-1,129)
y_train=y_train.reshape(-1,129)
  

** 2.Tensorflow模型搭建的训练和保存**

  • 这里可以采用自定义层和tensorflow的API搭建网络模型,以API为例子搭建多层LSTM网络模型用于训练。
import tensorflow as tf
from tensorflow import  keras
from keras.models import  Model
from keras.layers import Input, MaxPooling1D,Dense, Conv1D, Conv2D, Dropout, Flatten,\
 BatchNormalization, Reshape, Activation, concatenate,SeparableConv2D,LSTM,GRU,Conv2DTranspose,SimpleRNN
from keras.layers.pooling import MaxPooling2D

 '''模型输入'''
input_feature = Input(shape=y_train_feature.shape[1:], name='input_feature')

  '''模型搭建LSTM'''
fc1=LSTM(512,return_sequences=True)(input_feature)
fc2=LSTM(512,return_sequences=True)(fc1)
fc3=LSTM(512,return_sequences=True)(fc2)
fc4=LSTM(512)(fc3)
'''全连接的输出层'''
output_2 = Dense(129,activation='linear',kernel_initializer='random_uniform',\
                     bias_initializer='random_uniform',name='output1')(fc4)
   """以Model来组合整个网络"""
model = Model(inputs= input_feature, outputs=output_2)
model.summary()#察看网络架构
  • ** tensorflow模型的训练,优化器的选择,损失函数的设置,批量大小,学习率大小,训练epoch次数,最优模型的保存等操作。**
adam=Adam(lr=0.0001,decay=1e-6)#优化器的选择
model.compile(optimizer=adam,
          loss='mae',#均方误差
          metrics=['mae'])#平均绝对误差
'''训练网络'''

earlyStopping = callbacks.EarlyStopping(monitor='val_mae',patience=5,mode='min')

#保存模型的路径
filepath = "weights-improvement-{epoch:02d}-{val_mae:.2f}.h5"

#在每个训练期(epoch)后保存模型
#period:检查点之间的间隔(epoch数)
checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_mae', verbose=1, save_best_only=True,
                            mode='min',save_weights_only=False,period=1)

print('Training------')
   
model.fit(y_train_feature,y_train,epochs=800,batch_size=128,verbose=1,validation_data=(y_test_feature,y_test),shuffle=True,callbacks=[earlyStopping,checkpoint])

model.save(r'new_models_ripleinputV7.h5')

** 3.Tensorflow模型的调用**

  • 训练好并保存后的模型,调用时可以采用 load_model() 方法获得保存后的模型。
from keras.models import load_model
import numpy as np

model = tf.keras.models.load_model(r"保存模型的路径\LSTM.h5")
'''输入数据获得模型输出结果'''
output=model.predict(input_feature)#input_feature形状需要满足模型输入的形状要求

'''将张量形式的输出转化为numpy数组的形式'''
output=np.array(output)

4.整体代码实现

  • tensorflow网络模型的训练完整实现(API网络搭建)的形式。自定义的网络模型可采用同样的方法训练模型。
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 11 19:47:21 2021

@author: 茶墨先生
"""

'''导入需要的包'''
import tensorflow as tf
from tensorflow import  keras
from tensorflow.keras import  layers,Sequential,datasets
from keras import regularizers,callbacks 
tf.compat.v1.disable_eager_execution()
from keras import backend as K
from keras.datasets import mnist
from keras.optimizers import Adam
import numpy as np
from keras.models import  Model
from keras.layers import Input, MaxPooling1D,Dense, Conv1D, Conv2D, Dropout, Flatten,\
 BatchNormalization, Reshape, Activation, concatenate,SeparableConv2D,LSTM,GRU,Conv2DTranspose,SimpleRNN
from keras.layers.pooling import MaxPooling2D
  
#自定义模型
'''训练'''
def Train():
    '''加载训练数据'''
    y_train_feature=np.load('C:\结果\y_train_speech_feature.npy')
    y_test_feature=np.load('C:\结果\y_test_speech_feature.npy')
    y_train=np.load('C:\结果\y_train.npy')
    y_test=np.load('C:\结果\y_test.npy')
    
    y_test_feature=y_test_feature.reshape(-1,7,129)
    y_train_feature=y_train_feature.reshape(-1,7,129)
    y_test=y_test.reshape(-1,129)
    y_train=y_train.reshape(-1,129)
    print("y_train: ",y_train.shape)
    print("y_train_speech_feature: ", y_train_feature.shape)
    
    
    '''构建需要的网络模型'''
    input_feature = Input(shape=y_train_feature.shape[1:], name='input_feature')
    fc1=LSTM(512,return_sequences=True)(input_feature)
    fc2=LSTM(512,return_sequences=True)(fc1)
    fc3=LSTM(512,return_sequences=True)(fc2)
    fc4=LSTM(512)(fc3)
    output_2 = Dense(129,activation='linear',kernel_initializer='random_uniform',\
                     bias_initializer='random_uniform',name='output1')(fc4)
    """以Model来组合整个网络"""
    model = Model(inputs= input_feature, outputs=output_2)
    model.summary()
    
    adam=Adam(lr=0.0001,decay=1e-6)
    model.compile(optimizer=adam,
              loss='mae',#均方误差
              metrics=['mae'])#平均绝对误差
    '''训练网络'''
    
    earlyStopping = callbacks.EarlyStopping(monitor='val_mae',patience=5,mode='min')
    
    #保存模型的路径
    filepath = "weights-improvement-{epoch:02d}-{val_mae:.2f}.h5"
    
    
    #在每个训练期(epoch)后保存模型
    #period:检查点之间的间隔(epoch数)
    checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_mae', verbose=1, save_best_only=True,
                                mode='min',save_weights_only=False,period=1)
    
    print('Training------')
   
    model.fit(y_train_feature,y_train,epochs=800,batch_size=128,verbose=1,validation_data=(y_test_feature,y_test),shuffle=True,callbacks=[earlyStopping,checkpoint])
    
    model.save(r'new_models_ripleinputV7.h5')

'''主函数调用'''
if __name__ =='__main__':
    
    '''训练'''
    Train()

    

读书,生活,旅行。感谢关注和支持!


本文转载自: https://blog.csdn.net/qq_42719311/article/details/130337929
版权归原作者 茶墨先生 所有, 如有侵权,请联系我们删除。

“Tensorflow实现训练数据的加载—模型搭建训练保存—模型调用和加载全流程”的评论:

还没有评论