1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
|
def run(self):
# Init opencv
self.Capture = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(f'{basedir}/tmp/video/output_{datetime.now().strftime("%A_%d_%B_%Y_%I_%M_%S")}.avi',
fourcc, 20, (640, 480))
# Init image reference
ret, prevFrame = self.Capture.read()
self.prevFrame = self.grayScale(prevFrame)
motionFrames = 0
noMotionFrames = 0
self.recording = False
# Send slot of size video
self.VideoSize.emit([self.Capture.get(3), self.Capture.get(4)])
while self.is_running:
self.ret, self.frame = self.Capture.read()
self.gray = self.grayScale(self.frame)
blurred = cv2.GaussianBlur(self.gray, (21, 21), 0)
if self.firstFrame is None:
self.firstFrame = blurred
continue
frameDelta = cv2.absdiff(self.prevFrame, blurred)
thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
self.prevFrame = blurred
thresh = cv2.dilate(thresh, None, iterations=3)
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
self.motionStatus = "Not Moving"
for c in contours:
if cv2.contourArea(c) < detect_value: # value change size of detection
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(self.frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
self.motionStatus = "Moving"
# padding = 10
# cv2.rectangle(self.frame, (x - padding, y - padding), (x + w + padding, y + h + padding), (0, 255, 0),
# 1)
cv2.putText(self.frame, "Ferret Status: {}".format(self.motionStatus), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 0, 255), 1)
cv2.putText(self.frame, datetime.now().strftime("%A %d %B %Y %I:%M:%S"), (10, self.frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
if self.motionStatus == "Moving":
motionFrames += 1
noMotionFrames = 0
if motionFrames > 10:
self.recording = True
else:
noMotionFrames += 1
motionFrames = 0
if noMotionFrames > 10:
self.recording = False
if self.recording:
out.write(self.frame)
if self.ret:
Image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
FlippedImage = cv2.flip(Image, 1)
ConvertToQtFormat = QImage(FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0],
QImage.Format.Format_RGB888)
Pic = ConvertToQtFormat.scaled(640, 480, Qt.AspectRatioMode.KeepAspectRatio)
self.ImageUpdate.emit(Pic) |
Partager