首页 >> 大全

人工智能机器学习大作业

2024-01-01 大全 33 作者:考证青年

人工智能机器学习大作业

实验内容

1 理解人脸图像特征提取的各种方法(至少包括HoG、Dlib和卷积神经网络特征);2. 掌握笑脸数据集()正负样本的划分、模型训练和测试的过程(至少包括SVM、CNN),输出模型训练精度和测试精度(F1-score和ROC);3. 完成一个摄像头采集自己人脸、并对表情(笑脸和非笑脸)的实时分类判读(输出分类文字)的程序;4. 将笑脸数据集换成口罩数据集,完成对口罩佩戴与否的模型训练,采取合适的特征提取方法,重新做上述2-3部。

2 完成实验报告和技术报告,技术报告写入博客,提交博客地址到学习通,和代码发邮件。实验报告按照最初的doc实验报告模版格式撰写(实验类型写“综合性、创新性”),也提交至邮箱。

笑脸数据集()正负样本的划分、模型训练和测试的过程

训练数据集

import keras
keras.__version__
import os, shutil
# The path to the directory where the original
# dataset was uncompressed
riginal_dataset_dir = 'C:\\Users\\Desktop\\genki4k'
# The directory where we will
# store our smaller dataset
base_dir = 'C:\\Users\\Desktop\\genki4k\\smile_and_unsmile'
os.mkdir(base_dir)
# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir,
'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# Directory with our training smile pictures
train_smile_dir = os.path.join(train_dir, 'smile')
os.mkdir(train_smile_dir)
# Directory with our training unsmile pictures
train_unsmile_dir = os.path.join(train_dir,
'unsmile')
os.mkdir(train_unsmile_dir)
#Directory with our validation smile pictures
validation_smile_dir = os.path.join(validation_dir,
'smile')
os.mkdir(validation_smile_dir)
# Directory with our validation unsmile pictures
validation_unsmile_dir =
os.path.join(validation_dir, 'unsmile')
os.mkdir(validation_unsmile_dir)
# Directory with our validation smile pictures
test_smile_dir = os.path.join(test_dir, 'smile')
os.mkdir(test_smile_dir)
# Directory with our validation unsmile pictures
test_unsmile_dir = os.path.join(test_dir,
'unsmile')
os.mkdir(test_unsmile_dir)

** 构建小型卷积神经网络**

人工智能机器学习大作业_人工智能机器学习大作业_

from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3),
activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

数据预处理

from keras.preprocessing.image import
ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator =train_datagen.flow_from_directory(
#This is the target directory
train_dir,
# All
images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
#Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator =
test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')

摄像头笑脸识别

import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import dlib
from PIL import Image
model = load_model('smile_and_unsmile_2.h5')
detector = dlib.get_frontal_face_detector()
video=cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
def rec(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dets=detector(gray,1)
if dets is not None:
for face in dets:
left=face.left()
top=face.top()
right=face.right()
bottom=face.bottom()
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))
img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img1 = np.array(img1)/255.
img_tensor = img1.reshape(-1,150,150,3)
prediction =model.predict(img_tensor)   
print(prediction)
if prediction[0][0]>0.5:
result='unsmile'
else:
result='smile'
cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2,
cv2.LINE_AA)
cv2.imshow('Video', img)
while video.isOpened():
res,
img_rd = video.read()
if not
res:
break
rec(img_rd)
if
cv2.waitKey(1) & 0xFF == ord('q'):break
video.release()
cv2.destroyAllWindows()

佩戴口罩人脸识别

import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import dlib
from PIL import Image
model=load_model('C:\\Users\\UHS\\Desktop\\test\\smile_and_nosmile_1.h5')
detector = dlib.get_frontal_face_detector()
video=cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
def rec(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dets=detector(gray,1)
if dets is not None:
for face in dets:
left=face.left()
top=face.top()
right=face.right()
bottom=face.bottom()
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)
img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img1 =np.array(img1)/255.
img_tensor =img1.reshape(-1,150,150,3)
prediction=model.predict(img_tensor)    
print(prediction)
if prediction[0][0]>0.5:
result='mask'
else:
result='nomask'
v2.putText(img,result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('maskdetector', img)
while video.isOpened():
res, img_rd =video.read()
if not res:
break
rec(img_rd)
if cv2.waitKey(1)
& 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()

人工智能机器学习大作业_人工智能机器学习大作业_

口罩识别结果

实验体会

(1)在我进行这个项目的时候我没有把虚拟机的环境配好,所以使用做的,但是不管在哪里运行这个代码,都需要提前安装好Keras,不然程序是运行不了的。

(2)我的电脑太卡了,运行了好半天笑脸识别还是不行,所以我这个报告笑脸识别部分;

(3)代码运行的时候一定要提前写清楚文件的路径,我的模型训练花费了很久的时间才发现是我的训练集的路径不对,所以要提前检查好;

参考文献

关于我们

最火推荐

小编推荐

联系我们


版权声明:本站内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 88@qq.com 举报,一经查实,本站将立刻删除。备案号:桂ICP备2021009421号
Powered By Z-BlogPHP.
复制成功
微信号:
我知道了