Showing posts with label opencv. Show all posts
Showing posts with label opencv. Show all posts

Thursday, April 27, 2023

Handwritten Digit Recognition using OpenCV using Python

This code loads a pre-trained CNN model to recognize the digits, captures the video from the webcam, and analyzes each frame in real-time to recognize the digits. The code uses OpenCV to preprocess the images and extract the digits from the video frames. The recognized digits are printed on the video frames and displayed in real-time.

 

import cv2

import numpy as np

from keras.models import load_model


# Load the pre-trained CNN model

model = load_model('model.h5')


# Define the size of the image to be analyzed

IMG_SIZE = 28


# Define the function to preprocess the image

def preprocess_image(img):

    img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    img = img.astype('float32') / 255.0

    img = np.reshape(img, (1, IMG_SIZE, IMG_SIZE, 1))

    return img


# Define the function to recognize the digit

def recognize_digit(img):

    img_processed = preprocess_image(img)

    digit = model.predict_classes(img_processed)[0]

    return digit


# Capture the video from the webcam

cap = cv2.VideoCapture(0)


while True:

    # Read a frame from the video stream

    ret, frame = cap.read()

    

    # Convert the frame to grayscale

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    

    # Threshold the grayscale image

    ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

    

    # Find the contours in the thresholded image

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    

    # Loop through all the contours

    for contour in contours:

        # Find the bounding rectangle of the contour

        x, y, w, h = cv2.boundingRect(contour)

        

        # Ignore contours that are too small

        if w < 10 or h < 10:

            continue

        

        # Draw the rectangle around the contour

        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        

        # Extract the digit from the image

        digit_img = gray[y:y+h, x:x+w]

        

        # Recognize the digit

        digit = recognize_digit(digit_img)

        

        # Print the recognized digit on the frame

        cv2.putText(frame, str(digit), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

    

    # Display the video stream

    cv2.imshow('Handwritten Digit Recognition', frame)

    

    # Wait for a key press

    key = cv2.waitKey(1)

    

    # If the 'q' key is pressed, exit the loop

    if key == ord('q'):

        break


# Release the resources

cap.release()

cv2.destroyAllWindows()

 

Motion Detection using OpenCV using python

 import cv2


# Set up video capture device

cap = cv2.VideoCapture(0)


# Initialize variables

previous_frame = None


while True:

    # Capture current frame

    ret, current_frame = cap.read()


    # Convert to grayscale

    current_frame_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)


    # Check if previous frame exists

    if previous_frame is not None:

        # Compute absolute difference between current and previous frame

        frame_diff = cv2.absdiff(current_frame_gray, previous_frame)


        # Apply thresholding to remove noise

        thresh = cv2.threshold(frame_diff, 25, 255, cv2.THRESH_BINARY)[1]


        # Find contours of objects in thresholded image

        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)


        # Draw bounding box around each contour

        for contour in contours:

            (x, y, w, h) = cv2.boundingRect(contour)

            cv2.rectangle(current_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)


    # Update previous frame

    previous_frame = current_frame_gray


    # Display current frame

    cv2.imshow("Motion Detection", current_frame)


    # Exit on 'q' key press

    if cv2.waitKey(1) & 0xFF == ord('q'):

        break


# Release video capture device and destroy all windows

cap.release()

cv2.destroyAllWindows()

In this code, we capture frames from the default video capture device using cv2.VideoCapture(0). We then convert the current frame to grayscale using cv2.cvtColor(), and compute the absolute difference between the current and previous frames using cv2.absdiff(). We apply thresholding to the difference image to remove noise using cv2.threshold(), and find the contours of objects in the thresholded image using cv2.findContours(). Finally, we draw bounding boxes around each contour using cv2.rectangle().

To run this code, save it in a Python file (e.g., motion_detection.py) and run it using the command python motion_detection.py in a terminal or command prompt. Make sure you have OpenCV installed before running the code.

Wednesday, January 6, 2021

How can I remove background from images using OpenCV python ?

import cv2

import numpy as np


#== Parameters =======================================================================

BLUR = 21

CANNY_THRESH_1 = 10

CANNY_THRESH_2 = 200

MASK_DILATE_ITER = 10

MASK_ERODE_ITER = 10

MASK_COLOR = (0.0,0.0,1.0) # In BGR format



#== Processing =======================================================================


#-- Read image -----------------------------------------------------------------------

img = cv2.imread('C:/Temp/person.jpg')

gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


#-- Edge detection -------------------------------------------------------------------

edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)

edges = cv2.dilate(edges, None)

edges = cv2.erode(edges, None)


#-- Find contours in edges, sort by area ---------------------------------------------

contour_info = []

_, contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

# Previously, for a previous version of cv2, this line was: 

#  contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

# Thanks to notes from commenters, I've updated the code but left this note

for c in contours:

    contour_info.append((

        c,

        cv2.isContourConvex(c),

        cv2.contourArea(c),

    ))

contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)

max_contour = contour_info[0]


#-- Create empty mask, draw filled polygon on it corresponding to largest contour ----

# Mask is black, polygon is white

mask = np.zeros(edges.shape)

cv2.fillConvexPoly(mask, max_contour[0], (255))


#-- Smooth mask, then blur it --------------------------------------------------------

mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)

mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)

mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)

mask_stack = np.dstack([mask]*3)    # Create 3-channel alpha mask


#-- Blend masked img into MASK_COLOR background --------------------------------------

mask_stack  = mask_stack.astype('float32') / 255.0          # Use float matrices, 

img         = img.astype('float32') / 255.0                 #  for easy blending


masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR) # Blend

masked = (masked * 255).astype('uint8')                     # Convert back to 8-bit 


cv2.imshow('img', masked)                                   # Display

cv2.waitKey()


#cv2.imwrite('C:/Temp/person-masked.jpg', masked)           # Save

how to blur background using opencv python

    import cv2

import numpy as np

img = cv2.imread("C:/my_pics/rahul.png")
blurred_img = cv2.GaussianBlur(img, (21, 21), 0)

mask = np.zeros((512, 512, 3), dtype=np.uint8)
mask = cv2.circle(mask, (258, 258), 100, np.array([255, 255, 255]), -1)

out = np.where(mask==np.array([255, 255, 255]), img, blurred_img)

cv2.imwrite("./out.png", out)

ASP.NET Core

 Certainly! Here are 10 advanced .NET Core interview questions covering various topics: 1. **ASP.NET Core Middleware Pipeline**: Explain the...