Intermediate
Video Processing with OpenCV
Capture, process, and analyze video streams in real-time using background subtraction, optical flow, and object tracking.
Video Capture and Display
Python — Basic video capture
import cv2
# Open webcam (0 = default camera)
cap = cv2.VideoCapture(0)
# Or open a video file
# cap = cv2.VideoCapture("video.mp4")
if not cap.isOpened():
raise IOError("Cannot open video source")
# Get video properties
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f"Resolution: {width}x{height} @ {fps} FPS")
while True:
ret, frame = cap.read()
if not ret:
break
# Process frame here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("Video", gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Writing Video
Python — Save processed video
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('output.mp4', fourcc, 30.0, (640, 480))
cap = cv2.VideoCapture("input.mp4")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Apply processing
processed = cv2.GaussianBlur(frame, (15, 15), 0)
out.write(processed)
cap.release()
out.release()
Background Subtraction
Python — Motion detection
# Create background subtractor
bg_sub = cv2.createBackgroundSubtractorMOG2(
history=500, varThreshold=50, detectShadows=True
)
cap = cv2.VideoCapture("traffic.mp4")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Apply background subtraction
fg_mask = bg_sub.apply(frame)
# Clean up the mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_OPEN, kernel)
# Find moving objects
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) > 1000:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Motion Detection", frame)
Optical Flow
Python — Lucas-Kanade optical flow
cap = cv2.VideoCapture("video.mp4")
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
# Detect initial feature points
p0 = cv2.goodFeaturesToTrack(old_gray, maxCorners=100,
qualityLevel=0.3, minDistance=7)
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS |
cv2.TERM_CRITERIA_COUNT, 10, 0.03))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, status, err = cv2.calcOpticalFlowPyrLK(
old_gray, frame_gray, p0, None, **lk_params
)
# Draw tracks
good_new = p1[status == 1]
good_old = p0[status == 1]
for new, old in zip(good_new, good_old):
a, b = new.ravel().astype(int)
c, d = old.ravel().astype(int)
cv2.line(frame, (a, b), (c, d), (0, 255, 0), 2)
cv2.circle(frame, (a, b), 5, (0, 0, 255), -1)
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
Performance tip: For real-time applications, resize frames before processing. A 640x480 frame processes much faster than 1920x1080. Use
cv2.resize(frame, (640, 480)) at the start of your processing loop.
Lilly Tech Systems