1.
Perform the image transformations that include the geometric and
morphological transformations.
import cv2
import numpy as np
image = cv2.imread(r"D:\JAS\CV lab\rose.jpeg")
cv2.imshow('Original Image', image)
cv2.waitKey(0)
#Resizing
resized_image = cv2.resize(image, (300, 300))
cv2.imshow('Resized Image', resized_image)
cv2.waitKey(0)
#Rotating
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated_image = cv2.warpAffine(image, M, (w, h))
cv2.imshow('Rotated Image', rotated_image)
cv2.waitKey(0)
#Cropping
cropped_image = image[50:200, 50:200]
cv2.imshow('Cropped Image', cropped_image)
cv2.waitKey(0)
# Gray Scale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('Grayscale Image', gray_image)
cv2.waitKey(0)
#Blurring
blurred_image = cv2.GaussianBlur(image, (15, 15), 0)
cv2.imshow('Blurred Image', blurred_image)
cv2.waitKey(0)
# Edge Detection
edges = cv2.Canny(image, 100, 200)
cv2.imshow('Edge Detected Image', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Morphological Operations
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, binary_image = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)
cv2.imshow('Binary Image', binary_image)
cv2.waitKey(0)
#Erosion
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(binary_image, kernel, iterations=1)
cv2.imshow('Erosion', erosion)
cv2.waitKey(0)
#Dilation
dilation = cv2.dilate(binary_image, kernel, iterations=1)
cv2.imshow('Dilation', dilation)
cv2.waitKey(0)
#Opening
opening = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel)
cv2.imshow('Opening', opening)
cv2.waitKey(0)
#Closing
closing = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
cv2.imshow('Closing', closing)
cv2.waitKey(0)
#Affine Transformation
rows, cols = image.shape[:2]
pts1 = np.float32([[50, 50], [200, 50], [50, 200]])
pts2 = np.float32([[10, 100], [200, 50], [100, 250]])
M_affine = cv2.getAffineTransform(pts1, pts2)
affine_transformed = cv2.warpAffine(image, M_affine, (cols, rows))
cv2.imshow('Affine Transformation', affine_transformed)
cv2.waitKey(0)
# Perspective Transformation
pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M_perspective = cv2.getPerspectiveTransform(pts1, pts2)
perspective_transformed = cv2.warpPerspective(image, M_perspective, (300, 300))
cv2.imshow('Perspective Transformation', perspective_transformed)
cv2.waitKey(0)
cv2.destroyAllWindows()
2. Perform the image enhancement by applying contrast limited adaptive
histogram Equalization
import cv2
image = cv2.imread(r"D:\JAS\CV lab\rose.jpeg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Create and apply CLAHE (Contrast Limited Adaptive Histogram Equalization) object
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
enhanced_image = clahe.apply(gray)
#Display the images
cv2.imshow('Original Image', image)
cv2.imshow('Enhanced Image', enhanced_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#cv2.imwrite('enhanced_image.jpg', enhanced_image)
3. Perform the Contours and Region based segmentation in images.
import cv2
import numpy as np
image = cv2.imread(r'D:\JAS\CV lab\rose.jpeg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Apply binary thresholding
_, binary_image = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
#Detect contours in the binary image
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
image_with_contours = image.copy()
cv2.drawContours(image_with_contours, contours, -1, (0, 255, 0), 2)
#Display original and contour-detected images
cv2.imshow('Original Image', image)
cv2.imshow('Contours Detected', image_with_contours)
#Perform region based segementation
_, markers = cv2.connectedComponents(binary_image)
markers = markers + 1
markers[binary_image == 0] = 0
image_with_regions = image.copy()
cv2.watershed(image_with_regions, markers)
image_with_regions[markers == -1] = [0, 0, 255]
#Display segmented regions
cv2.imshow('Region-based Segmentation', image_with_regions)
cv2.waitKey(0)
cv2.destroyAllWindows()
4.Perform the wavelet transform on image using pywavelets
import numpy as np
import matplotlib.pyplot as plt
import pywt
import pywt.data
# Load the standard "camera" test image (512x512, grayscale)
original = pywt.data.camera()
# Perform 2D Discrete Wavelet Transform (DWT)
coeffs2 = pywt.dwt2(original, 'bior1.3')
LL, (LH, HL, HH) = coeffs2
titles = ['Approximation (LL)', 'Horizontal detail (LH)', 'Vertical detail (HL)', 'Diagonal detail
(HH)']
# Plot all sub-bands
fig = plt.figure(figsize=(12, 3))
for i, a in enumerate([LL, LH, HL, HH]):
ax = fig.add_subplot(1, 4, i + 1)
ax.imshow(a, interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(titles[i], fontsize=10)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
plt.show()
5.Perform the K-Means clustering for image segmentation using CV2 library.
import numpy as np
import cv2
import matplotlib.pyplot as plt
original_image = cv2.imread(r"D:\JAS\CV lab\rose.jpeg")
img = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# Reshape image for K-Means clustering
\vectorized = img.reshape((-1, 3))
# Convert to float32 (required for cv2.kmeans)
vectorized = np.float32(vectorized)
# Define K-Means parameters
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K=3
attempts = 10
# Apply K-Means Clustering
ret, label, center = cv2.kmeans(vectorized, K, None, criteria, attempts,
cv2.KMEANS_PP_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
result_image = res.reshape((img.shape))
# Apply Canny Edge Detection
figure_size = 15
edges = cv2.Canny(img, 150, 200)
# Display Original vs Segmented
plt.figure(figsize=(figure_size, figure_size))
plt.subplot(1, 2, 1), plt.imshow(img)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 2, 2), plt.imshow(result_image)
plt.title('Segmented Image when K = %i' % K), plt.xticks([]), plt.yticks([])
# Display Original vs Edges
plt.figure(figsize=(figure_size, figure_size))
plt.subplot(1, 2, 1), plt.imshow(img)
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 2, 2), plt.imshow(edges, cmap='gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()