Every line of 'videocapture cv2' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
131 def video(src = 0): 132 133 cap = cv2.VideoCapture(src) 134 135 if args['save']: 136 if os.path.isfile(args['save']+'.avi'): 137 os.remove(args['save']+'.avi') 138 out = cv2.VideoWriter(args['save']+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30,(int(cap.get(3)),int(cap.get(4)))) 139 140 while(cap.isOpened): 141 _ , frame = cap.read() 142 output_frame, op_crop = Eyeliner(frame) 143 144 if args['save']: 145 out.write(output_frame) 146 147 cv2.imshow("Artificial Eyeliner", cv2.resize(output_frame, (600,600))) 148 cv2.imshow('Eye Region', cv2.resize(op_crop, (400, 200))) 149 150 if cv2.waitKey(1) & 0xFF == ord('q'): 151 break 152 153 if args['save']: 154 out.release() 155 156 cap.release() 157 cv2.destroyAllWindows()
18 def detect(): 19 face = cv2.CascadeClassifier("data/haarcascade_frontalface_default.xml") 20 eye = cv2.CascadeClassifier("data/haarcascade_eye.xml") 21 22 camera = cv2.VideoCapture(0) # 0表示使用第一个摄像头 23 24 while True: 25 ret, frame = camera.read() # ret:布尔值表示是否读取帧成功, frame为帧本身 26 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 检测人脸需要基于灰度图像 27 28 faces = face.detectMultiScale(gray, 1.3, 5) 29 # faces = face.detectMultiScale(gray, scaleFactor, minNeighbors) 30 # scaleFactor: 每次迭代时图像的压缩率 31 # minNeighbors: 每个人脸矩形保留近似邻近数目的最小值 32 33 for x,y,w,h in faces: 34 img = cv2.rectangle(frame, (x,y), (x + w, y + h), (250, 0, 0), 2) 35 36 eye_area = gray[y : y + h, x : x + w] 37 eyes = eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40)) 38 # eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40))中 39 # (40, 40)参数目的是为了消除假阳性(false positive)的影响, 将眼睛搜索的最小尺寸现实为40x40 40 for ex,ey,ew,eh in eyes: 41 cv2.rectangle(frame, (x + ex, y + ey),(x + ex + ew, y + ey + eh), (0, 255, 0), 2) 42 43 cv2.imshow("face", frame) 44 if cv2.waitKey(1000 // 12) & 0xff == ord("q"): 45 break 46 camera.release() 47 cv2.destroyAllWindows()
4 def main(): 5 capture = cv2.VideoCapture(0) 6 7 if capture.isOpened(): 8 flag, frame = capture.read() 9 else: 10 flag = False 11 12 while flag: 13 14 flag, frame = capture.read() 15 16 frame = abs(255 - frame) 17 18 cv2.imshow("Video Camera", frame) 19 20 if cv2.waitKey(1) & 0xFF == 27: 21 break 22 23 cv2.destroyAllWindows()
4 def main(): 5 capture = cv2.VideoCapture(0) 6 eye_path = "../classifier/haarcascade_eye.xml" 7 face_path = "../classifier/haarcascade_frontalface_default.xml" 8 9 eye_cascade = cv2.CascadeClassifier(eye_path) 10 face_cascade = cv2.CascadeClassifier(face_path) 11 12 while (True): 13 _, frame = capture.read() 14 15 gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 16 17 eyes = eye_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(10,10)) 18 faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.05, minNeighbors=5, minSize=(40, 40)) 19 20 print("Number of eyes : " + str(len(eyes))) 21 print("Number of faces : " + str(len(faces))) 22 23 for (x, y, w, h) in eyes: 24 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) 25 26 for (x, y, w, h) in faces: 27 cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 28 29 cv2.imshow("Live Capture", frame) 30 31 if cv2.waitKey(1) == 27: 32 break 33 34 cv2.destroyAllWindows() 35 capture.release()
42 def detection(video, stream, fps, scale, details): 43 manager_cv2 = ManagerCV2(cv2.VideoCapture(video), is_stream=stream, fps_limit=fps) 44 manager_cv2.add_keystroke(27, 1, exit=True) 45 manager_cv2.add_keystroke(ord('d'), 1, 'detect') 46 manager_cv2.key_manager.detect = True # I want to start detecting 47 selector = None 48 frames_tracking = 0 49 50 for frame in manager_cv2: 51 if manager_cv2.key_manager.detect: 52 # I'm using the frame counter from the manager 53 # So I will recalculate de detection each 20 frames 54 # The other ones, I will continue using the same selector (so it will not change) 55 # NOTE: It is == 1 (and not == 0) because in the first iteration we 56 # get the first frame, so count_frames = 1. 57 # This is how I manage to ensure that in the first loop it is True 58 # so selector exist and doesn't raise an error. 59 if manager_cv2.count_frames % 20 == 1: 60 new_selector = face_detector(frame, scale, details) 61 if not selector or frames_tracking >= 30 or len(new_selector.zones) >= len(selector.zones): 62 selector = new_selector 63 frames_tracking = 0 64 manager_cv2.set_tracking(selector, frame) 65 else: 66 # The other frames I wil get the tracking of the last detections 67 selector = manager_cv2.get_tracking(frame) 68 frames_tracking += 1 69 frame = selector.draw(frame) 70 71 cv2.imshow('Face detection example', frame) 72 73 print('FPS: {}'.format(manager_cv2.get_fps())) 74 cv2.destroyAllWindows()
53 def load_video(): 54 manager_cv2 = ManagerCV2(cv2.VideoCapture('walking.mp4'), is_stream=False, fps_limit=3) 55 manager_cv2.add_keystroke(27, 1, print, 'Pressed esc. Exiting', exit=True) 56 storage_cv2 = StorageCV2(path='salida.json') 57 58 for frame, selector in zip(manager_cv2,storage_cv2): 59 frame = cv2.flip(frame, 1) 60 frame = selector.draw(frame) 61 cv2.imshow('Example face_recognition', frame) 62 print('FPS: {}'.format(manager_cv2.get_fps())) 63 cv2.destroyAllWindows()
193 def recognize_from_video(video, detector): 194 195 if video == '0': 196 print('[INFO] Webcam mode is activated') 197 capture = cv2.VideoCapture(0) 198 if not capture.isOpened(): 199 print("[ERROR] webcamera not found") 200 sys.exit(1) 201 else: 202 if pathlib.Path(video).exists(): 203 capture = cv2.VideoCapture(video) 204 205 while(True): 206 ret, img = capture.read() 207 if cv2.waitKey(1) & 0xFF == ord('q'): 208 break 209 if not ret: 210 continue 211 212 boxes, scores, cls_inds = detect_objects(img, detector) 213 img = draw_detection(img, boxes, scores, cls_inds) 214 cv2.imshow('frame', img) 215 216 # press q to end video capture 217 if cv2.waitKey(1)&0xFF == ord('q'): 218 break 219 if not ret: 220 continue 221 222 capture.release() 223 cv2.destroyAllWindows() 224 print('Script finished successfully.')
98 def recognize_from_video(): 99 # net initialize 100 env_id = ailia.get_gpu_environment_id() 101 print(f'env_id: {env_id}') 102 net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id) 103 104 if args.video == '0': 105 print('[INFO] Webcam mode is activated') 106 capture = cv2.VideoCapture(0) 107 if not capture.isOpened(): 108 print("[ERROR] webcamera not found") 109 sys.exit(1) 110 else: 111 if check_file_existance(args.video): 112 capture = cv2.VideoCapture(args.video) 113 114 while(True): 115 ret, frame = capture.read() 116 if cv2.waitKey(1) & 0xFF == ord('q'): 117 break 118 if not ret: 119 continue 120 121 input_image, input_data = preprocess_frame( 122 frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='127.5' 123 ) 124 125 # inference 126 input_blobs = net.get_input_blob_list() 127 net.set_input_blob_data(input_data, input_blobs[0]) 128 net.update() 129 preds_ailia = net.get_results() 130 131 # postprocessing 132 detections = postprocess(preds_ailia) 133 show_result(input_image, detections) 134 cv2.imshow('frame', input_image) 135 136 capture.release() 137 cv2.destroyAllWindows() 138 print('Script finished successfully.')