Every line of 'opencv videocapture python' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
4 def main(): 5 capture = cv2.VideoCapture(0) 6 7 if capture.isOpened(): 8 flag, frame = capture.read() 9 else: 10 flag = False 11 12 while flag: 13 14 flag, frame = capture.read() 15 16 frame = abs(255 - frame) 17 18 cv2.imshow("Video Camera", frame) 19 20 if cv2.waitKey(1) & 0xFF == 27: 21 break 22 23 cv2.destroyAllWindows()
4 def main(): 5 capture = cv2.VideoCapture(0) 6 eye_path = "../classifier/haarcascade_eye.xml" 7 face_path = "../classifier/haarcascade_frontalface_default.xml" 8 9 eye_cascade = cv2.CascadeClassifier(eye_path) 10 face_cascade = cv2.CascadeClassifier(face_path) 11 12 while (True): 13 _, frame = capture.read() 14 15 gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 16 17 eyes = eye_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(10,10)) 18 faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(40, 40)) 19 20 print("Number of eyes : " + str(len(eyes))) 21 print("Number of faces : " + str(len(faces))) 22 23 for (x, y, w, h) in eyes: 24 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) 25 26 for (x, y, w, h) in faces: 27 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 28 29 cv2.imshow("Live Capture", frame) 30 31 if cv2.waitKey(1) == 27: 32 break 33 34 cv2.destroyAllWindows() 35 capture.release()
131 def video(src = 0): 132 133 cap = cv2.VideoCapture(src) 134 135 if args['save']: 136 if os.path.isfile(args['save']+'.avi'): 137 os.remove(args['save']+'.avi') 138 out = cv2.VideoWriter(args['save']+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30,(int(cap.get(3)),int(cap.get(4)))) 139 140 while(cap.isOpened): 141 _ , frame = cap.read() 142 output_frame, op_crop = Eyeliner(frame) 143 144 if args['save']: 145 out.write(output_frame) 146 147 cv2.imshow("Artificial Eyeliner", cv2.resize(output_frame, (600,600))) 148 cv2.imshow('Eye Region', cv2.resize(op_crop, (400, 200))) 149 150 if cv2.waitKey(1) & 0xFF == ord('q'): 151 break 152 153 if args['save']: 154 out.release() 155 156 cap.release() 157 cv2.destroyAllWindows()
46 def init_camera(self): 47 # create the device 48 self._device = hg.cvCreateCameraCapture(self._index) 49 50 # Set preferred resolution 51 cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH, 52 self.resolution[0]) 53 cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT, 54 self.resolution[1]) 55 56 # and get frame to check if it's ok 57 frame = hg.cvQueryFrame(self._device) 58 # Just set the resolution to the frame we just got, but don't use 59 # self.resolution for that as that would cause an infinite recursion 60 # with self.init_camera (but slowly as we'd have to always get a frame). 61 self._resolution = (int(frame.width), int(frame.height)) 62 63 #get fps 64 self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS) 65 if self.fps <= 0: 66 self.fps = 1 / 30. 67 68 if not self.stopped: 69 self.start()
191 def startCamera(self, source_name): 192 193 try: 194 source = int(source_name) 195 except: 196 IkaUtils.dprint('%s: Looking up device name %s' % 197 (self, source_name)) 198 try: 199 source_name = source_name.encode('utf-8') 200 except: 201 pass 202 203 try: 204 source = self.enumerateInputSources().index(source_name) 205 except: 206 IkaUtils.dprint("%s: Input '%s' not found" % 207 (self, source_name)) 208 return False 209 210 IkaUtils.dprint('%s: initalizing capture device %s' % (self, source)) 211 self.realtime = True 212 if self.isWindows(): 213 self.initCapture(700 + source) 214 else: 215 self.initCapture(0 + source)
248 def main(): 249 250 # Open a camera, allowing us to read frames. Note that the frames are BGR, 251 # not RGB, since that's how OpenCV stores them. The index 0 is usually the 252 # integrated webcam on your laptop. If this fails, try different indices, or 253 # get a webcam. 254 camera = cv2.VideoCapture(0) 255 detector = FaceDetector() 256 257 # In every loop, we read from the camera, process the frame, and then 258 # display the processed frame. The process function is where all of the 259 # interesting logic happens. 260 while True: 261 # Timing code to get diagnostics on per frame performance 262 start = time.time() 263 _, frame = camera.read() 264 end = time.time() 265 logging.debug("Took %2.2f ms to read from camera", 266 round((end - start) * 1000, 2)) 267 268 start = time.time() 269 detector.process(frame) # Process 270 end = time.time() 271 logging.info("Took %2.2f ms to process frame", 272 round((end - start) * 1000, 2)) 273 274 # Standard display code, showing the resultant frame in a window titled 275 # "Video". Close the window with 'q'. 276 cv2.imshow('Video', frame) 277 if cv2.waitKey(1) & 0xFF == ord('q'): 278 break 279 280 # Clean up after ourselves. 281 camera.release() 282 cv2.destroyAllWindows()
18 def detect(): 19 face = cv2.CascadeClassifier("data/haarcascade_frontalface_default.xml") 20 eye = cv2.CascadeClassifier("data/haarcascade_eye.xml") 21 22 camera = cv2.VideoCapture(0) # 0表示使用第一个摄像头 23 24 while True: 25 ret, frame = camera.read() # ret:布尔值表示是否读取帧成功, frame为帧本身 26 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 检测人脸需要基于灰度图像 27 28 faces = face.detectMultiScale(gray, 1.3, 5) 29 # faces = face.detectMultiScale(gray, scaleFactor, minNeighbors) 30 # scaleFactor: 每次迭代时图像的压缩率 31 # minNeighbors: 每个人脸矩形保留近似邻近数目的最小值 32 33 for x,y,w,h in faces: 34 img = cv2.rectangle(frame, (x,y), (x + w, y + h), (250, 0, 0), 2) 35 36 eye_area = gray[y : y + h, x : x + w] 37 eyes = eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40)) 38 # eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40))中 39 # (40, 40)参数目的是为了消除假阳性(false positive)的影响, 将眼睛搜索的最小尺寸现实为40x40 40 for ex,ey,ew,eh in eyes: 41 cv2.rectangle(frame, (x + ex, y + ey),(x + ex + ew, y + ey + eh), (0, 255, 0), 2) 42 43 cv2.imshow("face", frame) 44 if cv2.waitKey(1000 // 12) & 0xff == ord("q"): 45 break 46 camera.release() 47 cv2.destroyAllWindows()
19 def __init__ (self, video_path): 20 ''' Camera class gets images from a video device and transform them 21 in order to detect objects in the image. 22 ''' 23 self.lock = threading.Lock() 24 video_path = path.expanduser(video_path) 25 26 if not path.isfile(video_path): 27 raise SystemExit('%s does not exists. Please check the path.' % (video_path)) 28 29 self.cam = cv2.VideoCapture(video_path) 30 if not self.cam.isOpened(): 31 print("%s is not a valid video file path." % (video_path)) 32 raise SystemExit("Please check your the video file: %s" %(video_path)) 33 34 self.im_width = self.cam.get(3) 35 self.im_height = self.cam.get(4)
42 def detection(video, stream, fps, scale, details): 43 manager_cv2 = ManagerCV2(cv2.VideoCapture(video), is_stream=stream, fps_limit=fps) 44 manager_cv2.add_keystroke(27, 1, exit=True) 45 manager_cv2.add_keystroke(ord('d'), 1, 'detect') 46 manager_cv2.key_manager.detect = True # I want to start detecting 47 selector = None 48 frames_tracking = 0 49 50 for frame in manager_cv2: 51 if manager_cv2.key_manager.detect: 52 # I'm using the frame counter from the manager 53 # So I will recalculate de detection each 20 frames 54 # The other ones, I will continue using the same selector (so it will not change) 55 # NOTE: It is == 1 (and not == 0) because in the first iteration we 56 # get the first frame, so count_frames = 1. 57 # This is how I manage to ensure that in the first loop it is True 58 # so selector exist and doesn't raise an error. 59 if manager_cv2.count_frames % 20 == 1: 60 new_selector = face_detector(frame, scale, details) 61 if not selector or frames_tracking >= 30 or len(new_selector.zones) >= len(selector.zones): 62 selector = new_selector 63 frames_tracking = 0 64 manager_cv2.set_tracking(selector, frame) 65 else: 66 # The other frames I wil get the tracking of the last detections 67 selector = manager_cv2.get_tracking(frame) 68 frames_tracking += 1 69 frame = selector.draw(frame) 70 71 cv2.imshow('Face detection example', frame) 72 73 print('FPS: {}'.format(manager_cv2.get_fps())) 74 cv2.destroyAllWindows()
42 def setupUsbCam(self): 43 # initialize the camera and grab a reference to the raw camera capture 44 self.rawCapture = cv2.VideoCapture(self.cameraNum) 45 46 # wait for camera to warm up 47 time.sleep(0.1)