1. 用于分类模型:
import numpy as np
import scipy.misc
import cv2
import os
# DF1
path = "/home/pi/工作/predict1/"
npy_list = os.listdir(path)
save_path = "/home/pi/predict1_img/"
if not os.path.exists(save_path):
os.mkdir(save_path)
for i in range(0, len(npy_list)):
print(i)
print(npy_list[i])
npy_full_path = os.path.join(path, npy_list[i])
img = np.load(npy_full_path) # load进来
save_full_path = os.path.join(save_path, npy_list[i][:-4])
scipy.misc.imsave(save_full_path, img) # 保存
2. 用于分割模型
"""
将数据集随机分成训练集、测试集
传入参数:
ratio = 0.7 # 训练样本比例
path = "/home/pi/20190701_0705" # 数据路径
new_path = "/home/pi/20190701_0705_new2" # 保存路径
使用方法:
temp = Generate_Train_and_Test(path, new_path, ratio)
temp.splict_data()
"""
import random
import os
import cv2
def makeDir(path):
try:
if not os.path.exists(path):
if not os.path.isfile(path):
# os.mkdir(path)
os.makedirs(path)
return 0
else:
return 1
except Exception as e:
print(str(e))
return -2
class Generate_Train_and_Test:
def __init__(self, path, new_path, ratio):
if not os.path.exists(new_path):
makeDir(new_path)
self.path = path
self.new_path = new_path
self.ratio = ratio
self.train_sample_path = os.path.join(new_path, "train")
self.test_sample_path = os.path.join(new_path, "test")
makeDir(self.train_sample_path)
makeDir(self.test_sample_path)
def splict_data(self):
class_names = os.listdir(self.path) # 类别:bg and ng10
for name in class_names:
print("process class name=%s" % name)
tmp_class_name = os.path.join(self.path, name)
save_train_class_name = os.path.join(self.train_sample_path, name)
save_test_class_name = os.path.join(self.test_sample_path, name)
makeDir(save_train_class_name)
makeDir(save_test_class_name)
if os.path.isdir(tmp_class_name):
image_names = os.listdir(tmp_class_name) # 其中一个类别的所有图像
image_names = [f for f in image_names if not f.endswith('_mask.png')]
total = len(image_names)
# 1, 打乱当前类中所有图像
random.shuffle(image_names)
# 2, 从当前类(ng)中,取前面的图像作为train data
train_temp = int(self.ratio * total) # 打乱后,取前面作为train_data
for i in range(0, train_temp):
print(i, image_names[i])
temp_img_name = os.path.join(tmp_class_name, image_names[i])
train_image = cv2.imread(temp_img_name)
temp_label_name = os.path.join(tmp_class_name, image_names[i][:-4] + '_mask.png')
train_label = cv2.imread(temp_label_name)
save_train_img_name = os.path.join(save_train_class_name, image_names[i])
cv2.imwrite(save_train_img_name, train_image)
save_train_label_name = os.path.join(save_train_class_name, image_names[i][:-4] + '_mask.png')
cv2.imwrite(save_train_label_name, train_label)
# 3, 从当前类(bg)中,取后面的图像作为test data
for i in range(train_temp, total):
print(i, image_names[i])
test_img_name = os.path.join(tmp_class_name, image_names[i])
test_image = cv2.imread(test_img_name)
test_label_name = os.path.join(tmp_class_name, image_names[i][:-4] + '_mask.png')
test_label = cv2.imread(test_label_name)
save_test_img_name = os.path.join(save_test_class_name, image_names[i])
cv2.imwrite(save_test_img_name, test_image)
save_test_label_name = os.path.join(save_test_class_name, image_names[i][:-4] + '_mask.png')
cv2.imwrite(save_test_label_name, test_label)
ratio = 0.7 # 训练样本比例
path = "/home/pi/工作/20190712_splict" # 数据路径
new_path = "/home/pi/工作/20190712_splict_new3" # 保存路径
temp = Generate_Train_and_Test(path, new_path, ratio)
temp.splict_data()
补充知识:python把由图片组成的文件夹转换为.npy文件
由于深度神经网络的需要,我要将一个里面全是.png格式的图片的文件夹转换为一个.npy文件,即将一个图片文件夹转换成一个.npy文件。
具体思路为:
若已知文件夹中图片数量,可生成一个三维数组,第一维表示图片数量,后两维表示一张图片的尺寸;
利用np.save()函数将生成的三维数组保存成一个.npy文件
import numpy as np
import imageio
import os
os.chdir('E:/RegistrationCode/papercode/datasets/mri_2d_test') #切换python工作路径到你要操作的图片文件夹,mri_2d_test为我的图片文件夹
a=np.ones((190,192,160)) #利用np.ones()函数生成一个三维数组,当然也可用np.zeros,此数组的每个元素a[i]保存一张图片
i=0
for filename in os.listdir(r"E:/RegistrationCode/papercode/datasets/mri_2d_test"): #使用os.listdir()获取该文件夹下每一张图片的名字
im=imageio.imread(filename)
a[i]=im
i=i+1
if(i==190): #190为文件夹中的图片数量
break
np.save('你要保存的.npy文件所在路径及名字',a)
以上这篇使用npy转image图像并保存的实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持python博客。
标签:numpy
Powered By python教程网 鲁ICP备18013710号
python博客 - 小白学python最友好的网站!