用keras进行大数据训练,为了加快训练,需要提前制作训练集。
由于HDF5的特性,所有数据需要一次性读入到内存中,才能保存。
为此,我采用分批次分为2个以上HDF5进行存储。
1、先读取每个标签下的图片,并设置标签
def load_dataset(path_name,data_path): images = [] labels = [] train_images = [] valid_images = [] train_labels = [] valid_labels = [] counter = 0 allpath = os.listdir(path_name) nb_classes = len(allpath) print("label_num: ",nb_classes) for child_dir in allpath: child_path = os.path.join(path_name, child_dir) for dir_image in os.listdir(child_path): if dir_image.endswith('.jpg'): img = cv2.imread(os.path.join(child_path, dir_image)) image = misc.imresize(img, (IMAGE_SIZE, IMAGE_SIZE), interp='bilinear') #resized_img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE)) images.append(image) labels.append(counter)
2、该标签下的数据集分割为训练集(train images),验证集(val images),训练标签(train labels),验证标签
(val labels)
def split_dataset(images, labels): train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size = 0.2, random_state = random.randint(0, 100)) #print(train_images.shape[0], 'train samples') #print(valid_images.shape[0], 'valid samples') return train_images, valid_images, train_labels ,valid_labels
3、分割后的数据分别添加到总的训练集,验证集,训练标签,验证标签。
其次,清空原有的图片集和标签集,目的是节省内存。假如一次性读入多个标签的数据集与标签集,进行数据分割后,会占用大于单纯进行上述操作两倍以上的内存。
images = np.array(images) t_images, v_images, t_labels ,v_labels = split_dataset(images, labels) for i in range(len(t_images)): train_images.append(t_images[i]) train_labels.append(t_labels[i]) for j in range(len(v_images)): valid_images.append(v_images[j]) valid_labels.append(v_labels[j]) if counter%50== 49: print( counter+1 , "is read to the memory!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") images = [] labels = [] counter = counter + 1 print("train_images num: ", len(train_images), " ", "valid_images num: ",len(valid_images))
4、进行判断,直到读到自己自己分割的那个标签。
开始进行写入。写入之前,为了更好地训练模型,需要把对应的图片集和标签打乱顺序。
if ((counter % 4316 == 4315) or (counter == nb_classes - 1)): print("start write images and labels data...................................................................") num = counter // 5000 dirs = data_path + "/" + "h5_" + str(num - 1) if not os.path.exists(dirs): os.makedirs(dirs) data2h5(dirs, t_images, v_images, t_labels ,v_labels)
对应打乱顺序并写入到HDF5
def data2h5(dirs_path, train_images, valid_images, train_labels ,valid_labels): TRAIN_HDF5 = dirs_path + '/' + "train.hdf5" VAL_HDF5 = dirs_path + '/' + "val.hdf5" #shuffle state1 = np.random.get_state() np.random.shuffle(train_images) np.random.set_state(state1) np.random.shuffle(train_labels) state2 = np.random.get_state() np.random.shuffle(valid_images) np.random.set_state(state2) np.random.shuffle(valid_labels) datasets = [ ("train",train_images,train_labels,TRAIN_HDF5), ("val",valid_images,valid_labels,VAL_HDF5)] for (dType,images,labels,outputPath) in datasets: # HDF5 initial f = h5py.File(outputPath, "w") f.create_dataset("x_"+dType, data=images) f.create_dataset("y_"+dType, data=labels) #f.create_dataset("x_"+dType, data=images, compression="gzip", compression_opts=9) #f.create_dataset("y_"+dType, data=labels, compression="gzip", compression_opts=9) f.close()
5、判断文件全部读入
def read_dataset(dirs): files = os.listdir(dirs) print(files) for file in files: path = dirs+'/' + file dataset = h5py.File(path, "r") file = file.split('.') set_x_orig = dataset["x_"+file[0]].shape[0] set_y_orig = dataset["y_"+file[0]].shape[0] print(set_x_orig) print(set_y_orig)
6、训练中,采用迭代器读入数据
def generator(self, datagen, mode): passes=np.inf aug = ImageDataGenerator( featurewise_center = False, samplewise_center = False, featurewise_std_normalization = False, samplewise_std_normalization = False, zca_whitening = False, rotation_range = 20, width_shift_range = 0.2, height_shift_range = 0.2, horizontal_flip = True, vertical_flip = False) epochs = 0 # 默认是无限循环遍历 while epochs < passes: # 遍历数据 file_dir = os.listdir(self.data_path) for file in file_dir: #print(file) file_path = os.path.join(self.data_path,file) TRAIN_HDF5 = file_path +"/train.hdf5" VAL_HDF5 = file_path +"/val.hdf5" #TEST_HDF5 = file_path +"/test.hdf5" db_t = h5py.File(TRAIN_HDF5) numImages_t = db_t['y_train'].shape[0] db_v = h5py.File(VAL_HDF5) numImages_v = db_v['y_val'].shape[0] if mode == "train": for i in np.arange(0, numImages_t, self.BS): images = db_t['x_train'][i: i+self.BS] labels = db_t['y_train'][i: i+self.BS] if K.image_data_format() == 'channels_first': images = images.reshape(images.shape[0], 3, IMAGE_SIZE,IMAGE_SIZE) else: images = images.reshape(images.shape[0], IMAGE_SIZE, IMAGE_SIZE, 3) images = images.astype('float32') images = images/255 if datagen : (images,labels) = next(aug.flow(images,labels,batch_size = self.BS)) # one-hot编码 if self.binarize: labels = np_utils.to_categorical(labels,self.classes) yield ({'input_1': images}, {'softmax': labels}) elif mode == "val": for i in np.arange(0, numImages_v, self.BS): images = db_v['x_val'][i: i+self.BS] labels = db_v['y_val'][i: i+self.BS] if K.image_data_format() == 'channels_first': images = images.reshape(images.shape[0], 3, IMAGE_SIZE,IMAGE_SIZE) else: images = images.reshape(images.shape[0], IMAGE_SIZE, IMAGE_SIZE, 3) images = images.astype('float32') images = images/255 if datagen : (images,labels) = next(aug.flow(images,labels,batch_size = self.BS)) #one-hot编码 if self.binarize: labels = np_utils.to_categorical(labels,self.classes) yield ({'input_1': images}, {'softmax': labels}) epochs += 1
7、至此,就大功告成了
完整的代码:
# -*- coding: utf-8 -*- """ Created on Mon Feb 12 20:46:12 2018 @author: william_yue """ import os import numpy as np import cv2 import random from scipy import misc import h5py from sklearn.model_selection import train_test_split from keras import backend as K K.clear_session() from keras.utils import np_utils IMAGE_SIZE = 128 # 加载数据集并按照交叉验证的原则划分数据集并进行相关预处理工作 def split_dataset(images, labels): # 导入了sklearn库的交叉验证模块,利用函数train_test_split()来划分训练集和验证集 # 划分出了20%的数据用于验证,80%用于训练模型 train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size = 0.2, random_state = random.randint(0, 100)) return train_images, valid_images, train_labels ,valid_labels def data2h5(dirs_path, train_images, valid_images, train_labels ,valid_labels): #def data2h5(dirs_path, train_images, valid_images, test_images, train_labels ,valid_labels, test_labels): TRAIN_HDF5 = dirs_path + '/' + "train.hdf5" VAL_HDF5 = dirs_path + '/' + "val.hdf5" #采用标签与图片相同的顺序分别打乱训练集与验证集 state1 = np.random.get_state() np.random.shuffle(train_images) np.random.set_state(state1) np.random.shuffle(train_labels) state2 = np.random.get_state() np.random.shuffle(valid_images) np.random.set_state(state2) np.random.shuffle(valid_labels) datasets = [ ("train",train_images,train_labels,TRAIN_HDF5), ("val",valid_images,valid_labels,VAL_HDF5)] for (dType,images,labels,outputPath) in datasets: # 初始化HDF5写入 f = h5py.File(outputPath, "w") f.create_dataset("x_"+dType, data=images) f.create_dataset("y_"+dType, data=labels) #f.create_dataset("x_"+dType, data=images, compression="gzip", compression_opts=9) #f.create_dataset("y_"+dType, data=labels, compression="gzip", compression_opts=9) f.close() def read_dataset(dirs): files = os.listdir(dirs) print(files) for file in files: path = dirs+'/' + file file_read = os.listdir(path) for i in file_read: path_read = os.path.join(path, i) dataset = h5py.File(path_read, "r") i = i.split('.') set_x_orig = dataset["x_"+i[0]].shape[0] set_y_orig = dataset["y_"+i[0]].shape[0] print(set_x_orig) print(set_y_orig) #循环读取每个标签集下的所有图片 def load_dataset(path_name,data_path): images = [] labels = [] train_images = [] valid_images = [] train_labels = [] valid_labels = [] counter = 0 allpath = os.listdir(path_name) nb_classes = len(allpath) print("label_num: ",nb_classes) for child_dir in allpath: child_path = os.path.join(path_name, child_dir) for dir_image in os.listdir(child_path): if dir_image.endswith('.jpg'): img = cv2.imread(os.path.join(child_path, dir_image)) image = misc.imresize(img, (IMAGE_SIZE, IMAGE_SIZE), interp='bilinear') #resized_img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE)) images.append(image) labels.append(counter) images = np.array(images) t_images, v_images, t_labels ,v_labels = split_dataset(images, labels) for i in range(len(t_images)): train_images.append(t_images[i]) train_labels.append(t_labels[i]) for j in range(len(v_images)): valid_images.append(v_images[j]) valid_labels.append(v_labels[j]) if counter%50== 49: print( counter+1 , "is read to the memory!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") images = [] labels = [] if ((counter % 4316 == 4315) or (counter == nb_classes - 1)): print("train_images num: ", len(train_images), " ", "valid_images num: ",len(valid_images)) print("start write images and labels data...................................................................") num = counter // 5000 dirs = data_path + "/" + "h5_" + str(num - 1) if not os.path.exists(dirs): os.makedirs(dirs) data2h5(dirs, train_images, valid_images, train_labels ,valid_labels) #read_dataset(dirs) print("File HDF5_%d "%num, " id done!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") train_images = [] valid_images = [] train_labels = [] valid_labels = [] counter = counter + 1 print("All File HDF5 done!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") read_dataset(data_path) #读取训练数据集的文件夹,把他们的名字返回给一个list def read_name_list(path_name): name_list = [] for child_dir in os.listdir(path_name): name_list.append(child_dir) return name_list if __name__ == '__main__': path = "data" data_path = "data_hdf5_half" if not os.path.exists(data_path): os.makedirs(data_path) load_dataset(path,data_path)
以上这篇完美解决keras 读取多个hdf5文件进行训练的问题就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。
更新日志
- 凤飞飞《我们的主题曲》飞跃制作[正版原抓WAV+CUE]
- 刘嘉亮《亮情歌2》[WAV+CUE][1G]
- 红馆40·谭咏麟《歌者恋歌浓情30年演唱会》3CD[低速原抓WAV+CUE][1.8G]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[320K/MP3][193.25MB]
- 【轻音乐】曼托凡尼乐团《精选辑》2CD.1998[FLAC+CUE整轨]
- 邝美云《心中有爱》1989年香港DMIJP版1MTO东芝首版[WAV+CUE]
- 群星《情叹-发烧女声DSD》天籁女声发烧碟[WAV+CUE]
- 刘纬武《睡眠宝宝竖琴童谣 吉卜力工作室 白噪音安抚》[FLAC/分轨][748.03MB]
- 理想混蛋《Origin Sessions》[320K/MP3][37.47MB]
- 公馆青少年《我其实一点都不酷》[320K/MP3][78.78MB]
- 群星《情叹-发烧男声DSD》最值得珍藏的完美男声[WAV+CUE]
- 群星《国韵飘香·贵妃醉酒HQCD黑胶王》2CD[WAV]
- 卫兰《DAUGHTER》【低速原抓WAV+CUE】
- 公馆青少年《我其实一点都不酷》[FLAC/分轨][398.22MB]
- ZWEI《迟暮的花 (Explicit)》[320K/MP3][57.16MB]