Daftar Lampiran Coding Python Recognize
Daftar Lampiran Coding Python Recognize
Daftar Lampiran Coding Python Recognize
import sys
from contextlib import contextmanager
from os import path, mkdir, listdir
from typing import Optional
import RPi.GPIO as GPIO
import time
import cv2
import numpy as np
from PyQt5.QtCore import QSize, QTimer, QStringListModel, Qt, \
QItemSelectionModel
from PyQt5.QtGui import QImage, QPixmap, QKeySequence
from PyQt5.QtWidgets import QWidget, QLabel, QApplication, QHBoxLayout, \
QShortcut, QVBoxLayout, QListView, QPushButton, QLineEdit, QGroupBox, \
QStyledItemDelegate
import data_provider
from model import PCALDAClassifier
class NoFacesError(Exception):
pass
class MultipleFacesError(Exception):
pass
class CapitalizeDelegate(QStyledItemDelegate):
def displayText(self, value, locale):
string = super().displayText(value, locale)
return string.capitalize()
class MainApp(QWidget):
STRANGER_DANGER = 350
IMAGE_SIZE = (100, 100)
1
# type: (int, Optional[QWidget]) -> None
super().__init__(parent=parent)
self.pkg_path = path.dirname(path.dirname(path.abspath(__file__)))
self.training_data_dir = path.join(self.pkg_path, 'train')
self.models_dir = path.join(self.pkg_path, 'models')
self.model_fname = 'fisherfaces.p'
try:
self.model = data_provider.load_model(
path.join(self.models_dir, self.model_fname))
except AssertionError:
self.model = None
self.existing_labels = QStringListModel(self.get_existing_labels())
self.fps = fps
self.video_size = QSize(640, 480)
self.gray_image = None
self.detected_faces = []
# Setup the UI
self.main_layout = QHBoxLayout()
self.setLayout(self.main_layout)
self.control_layout = QVBoxLayout()
self.control_layout.setSpacing(8)
self.main_layout.addItem(self.control_layout)
self.new_label_txt = QLineEdit(self)
self.new_label_txt.returnPressed.connect(self.add_new_label)
self.new_label_txt.returnPressed.connect(self.new_label_txt.clear)
self.control_layout.addWidget(self.new_label_txt)
2
# Setup the training area
train_box = QGroupBox('Train', self)
train_box_layout = QVBoxLayout()
train_box.setLayout(train_box_layout)
self.control_layout.addWidget(train_box)
self.train_btn = QPushButton('Train', self)
self.train_btn.clicked.connect(self.train)
train_box_layout.addWidget(self.train_btn)
self.control_layout.addStretch(0)
self.timer = QTimer()
self.timer.timeout.connect(self.display_video_stream)
self.timer.start(int(1000 / self.fps))
labels = self.existing_labels.stringList()
return labels[label_idx], distance
3
def get_training_data(self):
"""Read the images from disk into an n*(w*h) matrix."""
return data_provider.get_image_data_from_directory(
self.training_data_dir)
def train(self):
X, y, mapping = self.get_training_data()
# Inspect scree plot to determine appropriate number of PCA components
classifier = PCALDAClassifier(
n_components=2, pca_components=200, metric='euclidean',
).fit(X, y)
def add_new_label(self):
new_label = self.new_label_txt.text()
new_label = new_label.lower()
string_list = self.existing_labels.stringList()
def display_video_stream(self):
"""Read frame from camera and repaint QLabel widget."""
_, frame = self.capture.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.flip(frame, 1)
4
# Use the Viola-Jones face detector to detect faces to classify
face_cascade = cv2.CascadeClassifier(path.join(
self.pkg_path, 'resources', 'haarcascade_frontalface_default.xml'))
self.gray_image = gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
result = self.classify_face(face)
# If a model is loaded, we can predict
if result:
predicted, distance = self.classify_face(face)
if predicted.lower() == "zenzen":
print("Zenzen Detect")
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(12,GPIO.OUT)
time.sleep(5)
GPIO.cleanup()
5
cv2.rectangle(frame, (x, y), (x + w, y + h),
self.stranger_color, 2)
cv2.putText(frame, 'Stranger danger!', (x, y + h + 15),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, self.stranger_color)
@contextmanager
def stop_camera_feed(self):
"""Temporarly stop the feed and face detection."""
try:
self.timer.stop()
yield
finally:
self.timer.start(int(1000 / self.fps))
def take_picture(self):
# Notify the user there were no faces detected
if self.detected_faces is None or len(self.detected_faces) < 1:
return
raise NoFacesError()
if len(self.detected_faces) > 1:
return
raise MultipleFacesError()
with self.stop_camera_feed():
x, y, w, h = self.detected_faces[0]
if not self.selected_label:
return
self.save_image(denoised_image, self.selected_label)
@property
def selected_label(self):
index = self.labels_view.selectedIndexes()
if len(index) < 1:
return None
6
label = self.existing_labels.data(index[0], Qt.DisplayRole)
return label
def get_existing_labels(self):
"""Get a list of the currently existing labels"""
return data_provider.get_folder_names(self.training_data_dir)
existing_files = listdir(label_path)
existing_files = map(lambda p: path.splitext(p)[0], existing_files)
existing_files = list(map(int, existing_files))
last_fname = sorted(existing_files)[-1] if len(existing_files) else 0
if __name__ == "__main__":
app = QApplication(sys.argv)
win = MainApp()
win.show()
sys.exit(app.exec_())