python - How to specify video dimensions in a video with opencv -
this program determines when there dramatic change in pixels in video , prints out @ frame , @ millisecond change occurred. using millisecond value in program save image of instant in video analyze image determine height of person in video. problem video resolution small. video length spans along whole screen when played in media player height 3 inches. when save image based on millisecond value image pitch black because above , below small video frame. change video size video seen across entire screen , when save image video based on millisecond value not black. crucial part of project. please me. thank much.
i getting error: typeerror: expected cvcapture argument 'capture'
this how have gone changing height , width of video:
width = cv.setcaptureproperty(capfile, cv.cv_cap_prop_frame_width, 1280)
height = cv.setcaptureproperty(capfile,cv.cv_cap_prop_frame_height, 720) import sys import cv2 import cv import numpy np # advanced scene detection parameters intensity_threshold = 16 # pixel intensity threshold (0-255), default 16 minimum_percent = 95 # minimum amount of pixels allowed below threshold. block_size = 32 # number of rows sum per iteration. def main(): capfile = 'camera20.h264' cap = cv2.videocapture() cap.open(capfile) if not cap.isopened(): print "fatal error - not open video %s." % capfile return else: print "parsing video %s..." % capfile # stuff cap here. width = cv.setcaptureproperty(capfile, cv.cv_cap_prop_frame_width, 1280) height = cv.setcaptureproperty(capfile,cv.cv_cap_prop_frame_height, 720) print "video resolution: %d x %d" % (width, height) # allow threshold passed optional, second argument script. threshold = 50 print "detecting scenes threshold = %d" % threshold print "min. pixels under threshold = %d %%" % minimum_percent print "block/row size = %d" % block_size print "" min_percent = minimum_percent / 100.0 num_rows = block_size last_amt = 0 # number of pixel values above threshold in last frame. start_time = cv2.gettickcount() # used statistics after loop. while true: # next frame video. (rv, im) = cap.read() if not rv: # im valid image if , if rv true break # compute # of pixel values , minimum amount trigger fade. num_pixel_vals = float(im.shape[0] * im.shape[1] * im.shape[2]) min_pixels = int(num_pixel_vals * (1.0 - min_percent)) # loop through frame block-by-block, updating current sum. frame_amt = 0 curr_row = 0 while curr_row < im.shape[0]: # add # of pixel values in current block above threshold. frame_amt += np.sum( im[curr_row : curr_row + num_rows,:,:] > threshold ) if frame_amt > min_pixels: # can avoid checking rest of break # frame since crossed boundary. curr_row += num_rows # detect fade in black. if frame_amt >= min_pixels , last_amt < min_pixels: print "detected fade in @ %dms (frame %d)." % ( cap.get(cv2.cv.cv_cap_prop_pos_msec), cap.get(cv2.cv.cv_cap_prop_pos_frames) ) # detect fade out black. elif frame_amt < min_pixels , last_amt >= min_pixels: print "detected fade out @ %dms (frame %d)." % ( cap.get(cv2.cv.cv_cap_prop_pos_msec), cap.get(cv2.cv.cv_cap_prop_pos_frames) ) last_amt = frame_amt # store current mean compare in next iteration. # # of frames in video based on position of last frame read. frame_count = cap.get(cv2.cv.cv_cap_prop_pos_frames) # compute runtime , average framerate total_runtime = float(cv2.gettickcount() - start_time) / cv2.gettickfrequency() avg_framerate = float(frame_count) / total_runtime print "read %d frames video in %4.2f seconds (avg. %4.1f fps)." % ( frame_count, total_runtime, avg_framerate) cap.release() if __name__ == "__main__": main()
it seems you're mixing new , old syntax. try using this:
cv2.videocapture.set(propid, value) → retval
instead of this:
cv.setcaptureproperty(capture, property_id, value) → retval
Comments
Post a Comment