เนื่องจากผมกำลังทำโปรเจคด้าน Image Processing โดยใช้ OpenCV version 3.4.3+contibute ภาษาที่ใช้ python version 3.6 GUI เป็น PyQt5 ครับ แต่ผมติดปัญหาที่ว่าผมจะให้มันนับวัตถุที่กล้องมัน detection ได้ยังไงครับ ผมทำเป็นแบบ Real-Time ผ่านกล้อง Webcam ครับ โดยโปรเจคที่ผมทำคือโปรเจคนับจำนวนพริกและคัดแยกตามสี ประมาณว่าถ้าใน frame นั้นมีพริกสีแดงจำนวน 5 เมล็ดก็ให้แสดงค่าว่า Red count : 5 ใน GUI ที่ผมสร้างประมาณนี้ครับ ตอนนี้ Detect แบบ Multi color ได้อย่างเดียวครับโดยใช้ค่าสี hsv แต่ยังนับไม่ได้ ก็เลยอยากสอบถามท่านผู้รู้หรือผู้เชี่ยวชาญด้านนี้หน่อยครับว่าผมต้องเขียนประมาณไหนหรือเขียนแบบไหน โค้ดที่ผมเขียน ณ ตอนนี้ที่มัน detect ได้ตามด้านล่างเลยครับ >>
import sys
import cv2
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.uic import loadUi
import numpy as np
def cnt_count(args):
pass
class Life2Coding(QDialog):
def __init__(self):
super(Life2Coding,self).__init__()
loadUi('life2coding.ui',self)
self.image=None
self.startButton.clicked.connect(self.start_webcam)
self.stopButton.clicked.connect(self.stop_webcam)
self.trackButton.setCheckable(True)
self.trackButton.toggled.connect(self.track_webcam_color)
self.track_Enabled=False
self.color1_Button.clicked.connect(self.setColor1)
self.color2_Button.clicked.connect(self.setColor2)
self.color3_Button.clicked.connect(self.setColor3)
def track_webcam_color (self, status):
if status:
self.track_Enabled = True
self.trackButton.setText('Stop Tracking')
else:
self.track_Enabled = False
self.trackButton.setText('Track Color')
def setColor1(self):
self.color1_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color1_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color1_Label.setText('Min :'+str(self.color1_lower)+' Max :'+str(self.color1_upper))
def setColor2(self):
self.color2_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color2_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color2_Label.setText('Min :'+str(self.color2_lower)+' Max :'+str(self.color2_upper))
def setColor3(self):
self.color3_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color3_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color3_Label.setText('Min :'+str(self.color3_lower)+' Max :'+str(self.color3_upper))
def start_webcam(self):
self.capture=cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,481)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,541)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret,self.image=self.capture.read()
self.image=cv2.flip(self.image,1)
self.displayImage(self.image,1)
hsv = cv2.cvtColor(self.image,cv2.COLOR_BGR2HSV)
color_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
color_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
color_mask = cv2.inRange(hsv,color_lower,color_upper)
self.displayImage(color_mask,2)
if (self.track_Enabled and (self.color1_Check.isChecked() or self.color2_Check.isChecked() or self.color3_Check.isChecked())):
trackedImage = self.track_colored_object(self.image.copy())
self.displayImage(trackedImage,1)
else:
self.displayImage(self.image,1)
def track_colored_object(self,img):
blur = cv2.blur(img,(3,3))
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
if self.color1_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color1_lower, self.color1_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
print('Count :', cnt_count)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 1 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
str_count = 'UP: ' + str(cnt_count)
if self.color2_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color2_lower, self.color2_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 2 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if self.color3_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color3_lower, self.color3_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 3 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
return img
def stop_webcam(self):
self.timer.stop()
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3 : #[0]=rows , [1]=cols , [2]=channels
if img.shape[2]==4 :
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat )
#BGR>>RGB
outImage=outImage.rgbSwapped()
if window==1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if window==2:
self.processedLabel.setPixmap(QPixmap.fromImage(outImage))
self.processedLabel.setScaledContents(True)
if __name__=='__main__':
app = QApplication(sys.argv)
window = Life2Coding()
window.setWindowTitle('Chilli Tracking')
window.show()
sys.exit(app.exec_())
ขอสอบถามผู้รู้ด้าน OpenCV /python/Image Processing ครับอยากให้โปรแกรมนับจำนวนวัตถุที่ detect ได้ต้องทำอย่างไร
import sys
import cv2
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.uic import loadUi
import numpy as np
def cnt_count(args):
pass
class Life2Coding(QDialog):
def __init__(self):
super(Life2Coding,self).__init__()
loadUi('life2coding.ui',self)
self.image=None
self.startButton.clicked.connect(self.start_webcam)
self.stopButton.clicked.connect(self.stop_webcam)
self.trackButton.setCheckable(True)
self.trackButton.toggled.connect(self.track_webcam_color)
self.track_Enabled=False
self.color1_Button.clicked.connect(self.setColor1)
self.color2_Button.clicked.connect(self.setColor2)
self.color3_Button.clicked.connect(self.setColor3)
def track_webcam_color (self, status):
if status:
self.track_Enabled = True
self.trackButton.setText('Stop Tracking')
else:
self.track_Enabled = False
self.trackButton.setText('Track Color')
def setColor1(self):
self.color1_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color1_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color1_Label.setText('Min :'+str(self.color1_lower)+' Max :'+str(self.color1_upper))
def setColor2(self):
self.color2_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color2_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color2_Label.setText('Min :'+str(self.color2_lower)+' Max :'+str(self.color2_upper))
def setColor3(self):
self.color3_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
self.color3_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
self.color3_Label.setText('Min :'+str(self.color3_lower)+' Max :'+str(self.color3_upper))
def start_webcam(self):
self.capture=cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,481)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,541)
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret,self.image=self.capture.read()
self.image=cv2.flip(self.image,1)
self.displayImage(self.image,1)
hsv = cv2.cvtColor(self.image,cv2.COLOR_BGR2HSV)
color_lower = np.array([self.hMin_2.value(),self.sMin_2.value(), self.vMin_2.value()], np.uint8)
color_upper = np.array([self.hMax_2.value(), self.sMax_2.value(), self.vMax_2.value()], np.uint8)
color_mask = cv2.inRange(hsv,color_lower,color_upper)
self.displayImage(color_mask,2)
if (self.track_Enabled and (self.color1_Check.isChecked() or self.color2_Check.isChecked() or self.color3_Check.isChecked())):
trackedImage = self.track_colored_object(self.image.copy())
self.displayImage(trackedImage,1)
else:
self.displayImage(self.image,1)
def track_colored_object(self,img):
blur = cv2.blur(img,(3,3))
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
if self.color1_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color1_lower, self.color1_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
print('Count :', cnt_count)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 1 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
str_count = 'UP: ' + str(cnt_count)
if self.color2_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color2_lower, self.color2_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 2 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
if self.color3_Check.isChecked():
color_mask = cv2.inRange(hsv, self.color3_lower, self.color3_upper)
erode = cv2.erode(color_mask, None, iterations=2)
dilate = cv2.dilate(erode, None, iterations=10)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernelOpen)
maskclose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
(_, contours, hierachy) = cv2.findContours(maskclose, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area > 5000):
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img, 'Color 3 Detected', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
return img
def stop_webcam(self):
self.timer.stop()
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3 : #[0]=rows , [1]=cols , [2]=channels
if img.shape[2]==4 :
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat )
#BGR>>RGB
outImage=outImage.rgbSwapped()
if window==1:
self.imgLabel.setPixmap(QPixmap.fromImage(outImage))
self.imgLabel.setScaledContents(True)
if window==2:
self.processedLabel.setPixmap(QPixmap.fromImage(outImage))
self.processedLabel.setScaledContents(True)
if __name__=='__main__':
app = QApplication(sys.argv)
window = Life2Coding()
window.setWindowTitle('Chilli Tracking')
window.show()
sys.exit(app.exec_())