yes
download the zip at the end of here https://core-electronics.com.au/guides/ ... pberry-pi/
and copy the unzipped Object_Detection_Files directory under Desktop
download the zip at the end of here https://core-electronics.com.au/guides/ ... pberry-pi/
and copy the unzipped Object_Detection_Files directory under Desktop
Code:
#Import the Open-CV extra functionalitiesimport cv2import os# find userusers = []users.append(os.getlogin())#This is to pull the information about what each object is calledclassNames = []classFile = "/home/" + users[0] + "/Desktop/Object_Detection_Files/coco.names"with open(classFile,"rt") as f: classNames = f.read().rstrip("\n").split("\n")#This is to pull the information about what each object should look likeconfigPath = "/home/"+ users[0] + "/Desktop/Object_Detection_Files/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"weightsPath = "/home/"+ users[0] + "/Desktop/Object_Detection_Files/frozen_inference_graph.pb"#This is some set up values to get good resultsnet = cv2.dnn_DetectionModel(weightsPath,configPath)net.setInputSize(320,320)net.setInputScale(1.0/ 127.5)net.setInputMean((127.5, 127.5, 127.5))net.setInputSwapRB(True)#This is to set up what the drawn box size/colour is and the font/size/colour of the name tag and confidence label def getObjects(img, thres, nms, draw=True, objects=[]): classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=nms)#Below has been commented out, if you want to print each sighting of an object to the console you can uncomment below #print(classIds,bbox) if len(objects) == 0: objects = classNames objectInfo =[] if len(classIds) != 0: for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox): className = classNames[classId - 1] if className in objects: objectInfo.append([box,className]) if (draw): cv2.rectangle(img,box,color=(0,255,0),thickness=2) cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30), cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) return img,objectInfo# find USB cameracam1 = -1x = 0while cam1 == -1 and x < 42: txt = "v4l2-ctl -d " + str(x) + " --list-ctrls > cam_ctrls.txt" os.system(txt) ctrls = [] with open("cam_ctrls.txt", "r") as file: line = file.readline() while line: ctrls.append(line) line = file.readline() if 'User Controls\n' in ctrls and ('Camera Controls\n' in ctrls): cam1 = x else: x +=1if cam1 == -1: print(" No USB camera found !!") #Below determines the size of the live feed window that will be displayed on the Raspberry Pi OSif __name__ == "__main__": # start USB camera vid1 = cv2.VideoCapture(cam1) vid1.set(cv2.CAP_PROP_FRAME_WIDTH, 640) vid1.set(cv2.CAP_PROP_FRAME_HEIGHT,480) #Below is the never ending loop that determines what will happen when an object is identified. while True: # GET AN IMAGE from USB camera ret, img = vid1.read() #Below provides a huge amount of controll. the 0.45 number is the threshold number, the 0.2 number is the nms number) result, objectInfo = getObjects(img,0.45,0.2) #print(objectInfo) cv2.imshow("Output",img) k = cv2.waitKey(200) if k == 27: # Esc key to stop # EXIT cv2.destroyAllWindows() break
Statistics: Posted by gordon77 — Mon Jan 22, 2024 9:12 am