import cv2
import time
import numpy as np
# Load pre-trained vehicle detection model
vehicle_cascade = cv2.CascadeClassifier('cars.xml')
# Video capture
cap1 = cv2.VideoCapture('traffic_video.mp4')
cap2 = cv2.VideoCapture('traffic_video2.mp4')
cap3 = cv2.VideoCapture('traffic_video3.mp4')
cap4 = cv2.VideoCapture('traffic_video1.mp4')
# Video dimensions
frame_width = int(cap1.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Initialize signal timing parameters (in seconds)
total_time = 60 # Total time for one cycle of signals
green_time = 20 # Default green time for each signal
yellow_time = 5 # Default yellow time for each signal
# Function to calculate signal times based on vehicle density
def calculate_signal_times(vehicle_density):
# Adjust signal times based on vehicle density
# Example: Reduce green time and increase yellow time for higher vehicle
density
adjusted_green_time = max(5, green_time - vehicle_density)
adjusted_yellow_time = min(10, yellow_time + vehicle_density)
return adjusted_green_time, adjusted_yellow_time
# Initialize signal times for each quadrant
signal_times = [total_time] * 4
# Start time for signal timing
start_time = time.time()
# Create window to display videos
cv2.namedWindow("Multi-Window Display", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Multi-Window Display", frame_width * 2, frame_height * 2)
# Middle line position for vehicle counting
middle_line_y = frame_height // 2
while True:
# Read frames from all four videos
ret1, frame1 = cap1.read()
ret2, frame2 = cap2.read()
ret3, frame3 = cap3.read()
ret4, frame4 = cap4.read()
if not (ret1 and ret2 and ret3 and ret4):
break
# Resize frames
frame1 = cv2.resize(frame1, (frame_width, frame_height))
frame2 = cv2.resize(frame2, (frame_width, frame_height))
frame3 = cv2.resize(frame3, (frame_width, frame_height))
frame4 = cv2.resize(frame4, (frame_width, frame_height))
# Combine frames into a single window
combined_frame = np.zeros((frame_height * 2, frame_width * 2, 3),
dtype=np.uint8)
# Assign each frame to its corresponding quadrant in the combined frame
combined_frame[0:frame_height, 0:frame_width] = frame1
combined_frame[0:frame_height, frame_width:frame_width * 2] = frame2
combined_frame[frame_height:frame_height * 2, 0:frame_width] = frame3
combined_frame[frame_height:frame_height * 2, frame_width:frame_width * 2]
= frame4
# Detect vehicles and count them in each quadrant
total_vehicles = 0
for idx, quadrant in enumerate([frame1, frame2, frame3, frame4], start=1):
gray = cv2.cvtColor(quadrant, cv2.COLOR_BGR2GRAY)
vehicles = vehicle_cascade.detectMultiScale(gray, 1.1, 3)
# Count vehicles based on middle line position
for (x, y, w, h) in vehicles:
if y <= middle_line_y <= y + h:
total_vehicles += 1
# Display vehicle count and signal information
if signal_times[idx - 1] > 0:
signal_color = "Green"#(0, 255, 0) # Green
time_allotted = signal_times[idx - 1]
else:
signal_color = "Red" #(0, 0, 255) # Red
time_allotted = 0
# Draw black background rectangle for text
text = f'Quadrant {idx} - Vehicles: {total_vehicles} | Signal:
{signal_color} | Time Allotted: {time_allotted} sec'
text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
cv2.rectangle(combined_frame, (10 + (idx % 2) * frame_width,
frame_height * (idx // 3 + 1) - 30),
(10 + (idx % 2) * frame_width + text_size[0],
frame_height * (idx // 3 + 1)), (0, 0, 0), -1)
# Display text in white color
cv2.putText(combined_frame, text,
(10 + (idx % 2) * frame_width, frame_height * (idx // 3 +
1) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# Display the combined frame
cv2.imshow('Multi-Window Display', combined_frame)
# Calculate average vehicle density across all quadrants
avg_vehicle_density = total_vehicles / 4
# Calculate signal times based on vehicle density
green_time, yellow_time = calculate_signal_times(avg_vehicle_density)
# Update signal times for each quadrant
signal_times = [green_time + yellow_time] * 4
# Check if the allotted time for the current signal has elapsed
elapsed_time = time.time() - start_time
if elapsed_time >= total_time:
start_time = time.time() # Reset start time
# Log signal times for the next cycle
print("Signal Times for Next Cycle:")
for idx, time_allotted in enumerate(signal_times, start=1):
print(f"Quadrant {idx}: Green Time: {green_time} sec, Yellow Time:
{yellow_time} sec")
print("")
# Check for user input to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture objects and close windows
cap1.release()
cap2.release()
cap3.release()
cap4.release()
cv2.destroyAllWindows()