KEMBAR78
Ada Practical File | PDF | Algorithms And Data Structures | Algorithms
0% found this document useful (0 votes)
25 views36 pages

Ada Practical File

The document outlines various sorting algorithms implemented using arrays, including Merge Sort, Quick Sort, Bubble Sort, Radix Sort, Selection Sort, Heap Sort, Bucket Sort, and Shell Sort. Each algorithm is described with its steps, time complexity, and includes source code examples in C++. The document serves as a comprehensive guide for understanding and analyzing these sorting techniques.

Uploaded by

ojasbhatia13
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
25 views36 pages

Ada Practical File

The document outlines various sorting algorithms implemented using arrays, including Merge Sort, Quick Sort, Bubble Sort, Radix Sort, Selection Sort, Heap Sort, Bucket Sort, and Shell Sort. Each algorithm is described with its steps, time complexity, and includes source code examples in C++. The document serves as a comprehensive guide for understanding and analyzing these sorting techniques.

Uploaded by

ojasbhatia13
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 36

Index:

S.NO. NAME OF EXPERIMENT DATE PAGE NO. SIGNATURE


Experiment-1:
Aim:
To implement following algorithm using array as a data structure and analyse its time
complexity:
a. Merge Sort
b. Quick Sort
c. Bubble Sort
d. Radix Sort
e. Selection Sort
f. Heap Sort
g. Bucket Sort
h. Shell Sort
1-A (Merge sort):
Algorithm:
Step 1: Sort the left half of the array.
Step 2: Sort the right half of the array.
Step 3: Merge both the arrays.
The base case of recursion is array of size zero or one, which are in order by definition, so
they never need to be sorted.
Time Analysis:
Best Case: O(n logn)
Average Case: O(n logn)
Worst Case: O(n logn)
Space Complexity: O(n)

Source Code:
#include <bits/stdc++.h>
using namespace std;
#define printTime(x) \
cout << "TIME: " << (float)x / CLOCKS_PER_SEC << " sec\n"; \
#define askinp(x) cout << x << "\n$ ";
#define printArr(arr) \
for (int i : arr) \
cout << i << " "; \
cout << endl;
// Merge Sort
void merge(int arr[], int left, int mid, int right)
{
int sizeFirst = mid - left + 1;
int sizeSecond = right - mid;
int firstArr[sizeFirst];
int secondArr[sizeSecond];

for (int i = 0; i < sizeFirst; i++)


{
firstArr[i] = arr[left + i];
}
for (int i = 0; i < sizeSecond; i++)
{
secondArr[i] = arr[mid + 1 + i];
}

int i = 0, j = 0, k = left;

while (i < sizeFirst && j < sizeSecond)


{
if (firstArr[i] < secondArr[j])
{
arr[k++] = firstArr[i++];
}
else
{
arr[k++] = secondArr[j++];
}
}
while (i < sizeFirst)
{
arr[k++] = firstArr[i++];
}
while (j < sizeSecond)
{
arr[k++] = secondArr[j++];
}
}
void applyms(int arr[], int left, int right)
{
if (left >= right)
{
return;
}

int mid = left + (right - left) / 2;


applyms(arr, left, mid);
applyms(arr, mid + 1, right);

merge(arr, left, mid, right);


}

void mergesort(int arr[], int n)


{
applyms(arr, 0, n - 1);
}
int main(void)
{
clock_t _t;
int n;
askinp("Enter Array Length");
cin >> n;
int arr[n];
for (int i = 0; i < n; i++)
{
arr[i] = rand() % 200;
}
cout << "Before Sorting: ";
printArr(arr);
_t = clock();
mergesort(arr, n);
_t = clock() - _t;
cout << "After Sorting: ";
printArr(arr);

printTime(_t);
return 0;
}
Output:
1-B (Quick Sort):
Algorithm:
Step 1: Pick an element, called a pivot, from the array.
Step 2: Partitioning: record the so that all elements with values less than pivot comes before
pivot, while all elements with values greater than pivot come after it. After this, the pivot is in
its final position. This is called the Partition Operation.
Step 3: Recursively apply the above steps to the sub-array of elements with smaller values
and separately to the sub-array of elements with greater value.
Time Complexity:
Best Case: O(n logn)
Average Case: O(n logn)
Worst Case: O(n^2)
Space Complexity: O(n)

Source Code:
#include <bits/stdc++.h>
using namespace std;
#define printTime(x) \
cout << "TIME: " << (float)x / CLOCKS_PER_SEC << " sec\n"; \
#define askinp(x) cout << x << "\n$ ";
#define printArr(arr) \
for (int i : arr) \
cout << i << " "; \
cout << endl;
// Quick Sort

void swap(int &a, int &b)


{
int temp = a;
a = b;
b = temp;
}

int partition(int arr[], int low, int high)


{
int pivot = arr[high];

int i = low - 1;

for (int j = low; j < high; j++)


{
if (arr[j] < pivot)
{
i++;
swap(arr[i], arr[j]);
}
}

swap(arr[i + 1], arr[high]);


return i + 1;
}

void quickSort(int arr[], int low, int high)


{
if (low >= high)
{
return;
}

int pivot = partition(arr, low, high);

quickSort(arr, low, pivot - 1);


quickSort(arr, pivot + 1, high);
}

int main(void)
{
clock_t _t;
int n;
askinp("Enter Array Length");
cin >> n;
int arr[n];
for (int i = 0; i < n; i++)
{
arr[i] = rand() % 200;
}
cout << "Before Sorting: ";
printArr(arr);
_t = clock();
quickSort(arr, 0, n - 1);
_t = clock() - _t;
cout << "After Sorting: ";
printArr(arr);
printTime(_t);
return 0;
}
Output:

1-C (Bubble Sort):


Algorithm:
Step 1: Begin Bubble Sort(arr)
Step 2: For all array elements
Step 3: If arr[i] > arr[i+1]
Step 4: swap (arr[i], arr[i+1])
Step 5: end if
Step 6: end for
Step 7: return arr
Step 8: end bubble sort
Time Complexity:
Best Case: O(n)
Average Case: O(n^2)
Worst Case: O(n^2)

Source Code:
#include <iostream>
#include <time.h>
using namespace std;
void bubbleSort(int a[], int n) {
int i, j;
for(i=0; i<n-1; i++) {
for(j=0; j<n-i-1; j++) {
if(a[j] > a[j+1]) {
int temp = a[j];
a[j] = a[j+1];
a[j+1] = temp;
}
}
}
}
void printArray(int a[], int n) {
for(int i=0; i<n; i++) {
cout<<a[i]<<" ";
} cout<<endl;
}
int main() {
int a[10] = {5,9,2,6,11,10,3,21,69,31};
cout<<"Original Array: ";
printArray(a, 10);
clock_t startTime = clock();
bubbleSort(a,10);
clock_t endTime = clock();
cout<<"Sorted Array: ";
printArray(a,10);
cout<<"Time Taken: "<<endTime - startTime<<"μs";

return 0;
}

Output:

1-D (Radix Sort)


Algorithm:
Step 1: It is a non-comparative sorting algorithm. It avoids comparison by creating and
distributing elements into buckets according to their radix.
Step 2: Find the largest element in the array.
Step 3: Now, go through each significant place one by one. Use stable sorting technique to
sort the digits at each significant place. We have used counting sort for this.
Step 4: Now, sort the elements based on digits at tens place.
Step 5: Finally, sort the elements based on the digits at hundreds place.
Time Complexity:
Best Case: O(a*n)
Average Case: O(p*(n+d))
Worst Case: O(n^2)
Space Complexity: O(n+R)
Source Code:
#include <iostream>
using namespace std;
int getMax(int a[], int n) {
int max = a[0];
for(int i = 1; i<n; i++) {
if(a[i] > max)
max = a[i];
}
return max;
}
void countingSort(int a[], int n, int place) {
int output[n + 1];
int count[10] = {0};
for (int i = 0; i < n; i++)
count[(a[i] / place) % 10]++;
for (int i = 1; i < 10; i++)
count[i] += count[i - 1];
for (int i = n - 1; i >= 0; i--) {
output[count[(a[i] / place) % 10] - 1] = a[i];
count[(a[i] / place) % 10]--;
}
for (int i = 0; i < n; i++)
a[i] = output[i];
}
void radixsort(int a[], int n) {
int max = getMax(a, n);
for (int place = 1; max / place > 0; place *= 10)
countingSort(a, n, place);
}

void printArray(int a[], int n) {


for (int i = 0; i < n; ++i)
cout<<a[i]<<" ";
}
int main() {
int a[] = {171, 279, 380, 111, 135, 726, 504, 878, 112};
int n = sizeof(a) / sizeof(a[0]);
cout<<"Before sorting array elements are - \n";
printArray(a,n);
radixsort(a, n);
cout<<"\n\nAfter applying Radix sort, the array elements are - \n";
printArray(a, n);
return 0;
}

Output:

1-E (Selection Sort):


Algorithm:
Step 1: Set the first element as minimum.
Step 2: Compare minimum with the second element. If the second element is smaller than
minimum, assign the second element as minimum.
Compare minimum with the third element. Again, if the third element is smaller, then assign
minimum to the third element otherwise do nothing. The process goes on until the last
element.
Step 3: After each iteration, minimum is placed in the front of the unsorted list.
Step 4: For each iteration, indexing starts from the first unsorted element. Step 1 to 3 are
repeated until all the elements are placed at their correct positions.
Time Complexity:
Best Case: Ω(n)
Average Case: O(n^2)
Worst Case: O(n^2)
Source Code:
#include <iostream>
#include <time.h>
using namespace std;
void insertionSort(int arr[], int n) {
int i, key, j;
for (i = 1; i < n; i++){
key = arr[i];
j = i - 1;
while (j >= 0 && arr[j] > key){
arr[j + 1] = arr[j];
j = j - 1;
}
arr[j + 1] = key;
}
}

void printArray(int a[], int n) {


for(int i=0; i<n; i++) {
cout<<a[i]<<" ";
}
cout<<endl;
}
int main() {
int a[10] = {5,9,2,6,11,10,3,21,69,31};
cout<<"Original Array: ";
printArray(a, 10);

clock_t startTime = clock();


insertionSort(a,10);
clock_t endTime = clock();
cout<<"Sorted Array: ";
printArray(a,10);
cout<<"Time Taken: "<<endTime - startTime<<"μs";
return 0;
}

Output:

1-F (Heap Sort):


Algorithm:
Step 1: The heap sort algorithm to arrange a list of elements in ascending order is performed.
Step 2: Construct a binary tree with given list of elements.
Step 3: Transform the root element from min. heap.
Step 4: Delete the root element from min. heap using heapify method.
Step 5: Put the element in the sorted list.
Step 6: Repeat the same until min heap becomes empty.
Step 7: Display the List.
Time Complexity:
Best Case: O(n logn)
Average Case: O(n logn)
Worst Case: O(n logn)
Space Complexity: O(1)
Source Code:
#include <iostream>
using namespace std;
/* function to heapify a subtree. Here 'i' is the
index of root node in array a[], and 'n' is the size of heap. */
void heapify(int a[], int n, int i)
{
int largest = i; // Initialize largest as root
int left = 2 * i + 1; // left child
int right = 2 * i + 2; // right child
// If left child is larger than root
if (left < n && a[left] > a[largest])
largest = left;
if (right < n && a[right] > a[largest])
largest = right;
if (largest != i) {
int temp = a[i];
a[i] = a[largest];
a[largest] = temp;
heapify(a, n, largest);
}
}
void heapSort(int a[], int n)
{
for (int i = n / 2 - 1; i >= 0; i--)
heapify(a, n, i);
for (int i = n - 1; i >= 0; i--)
{
int temp = a[0];
a[0] = a[i];
a[i] = temp;
heapify(a, i, 0);
}
}
void printArr(int a[], int n)
{
for (int i = 0; i < n; ++i)
{
cout<<a[i]<<" ";
}
}
int main()
{
int a[] = {47, 9, 22, 42, 27, 25, 0};
int n = sizeof(a) / sizeof(a[0]);
cout<<"Before sorting array elements are - \n";
printArr(a, n);
heapSort(a, n);
cout<<"\nAfter sorting array elements are - \n";
printArr(a, n);
return 0;
}

Output:
1-G (Bucket Sort):
Algorithm:
Step 1: Create n empty buckets (Or lists).
Step 2: Insert arr[i] into bucket[n*array[i]]
Step 3: Sort individual buckets using insertion sort.
Step 4: Concatenate all sorted buckets.
Time Complexity:
Best Case: O(n + k)
Average Case: O(n +k)
Worst Case: O(n2)
Source Code:
#include <algorithm>
#include <iostream>
#include <vector>
using namespace std;
void bucketSort(float arr[], int n)
{
vector<float> b[n];
for (int i = 0; i < n; i++) {
int bi = n * arr[i]; // Index in bucket
b[bi].push_back(arr[i]);
}

for (int i = 0; i < n; i++)


sort(b[i].begin(), b[i].end());

int index = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < b[i].size(); j++)
arr[index++] = b[i][j];
}

int main()
{
float arr[]
= { 0.897, 0.565, 0.656, 0.1234, 0.665, 0.3434 };
int n = sizeof(arr) / sizeof(arr[0]);
bucketSort(arr, n);

cout << "Sorted array is \n";


for (int i = 0; i < n; i++)
cout << arr[i] << " ";
return 0;
}
Output:

1-H (Shell Sort):


Algorithm:
Step 1: Start
Step 2: Initialize the value of gap size. Example: h
Step 3: Divide the list into smaller sub-part. Each must have equal intervals to h
Step 4: Sort these sub-lists using insertion sort
Step 5: Repeat this step 2 until the list is sorted.
Step 6: Print a sorted list.
Step 7: Stop.
Time Complexity:
Best Case: O(n logn)
Average Case: O(n logn)
Worst Case: O(n^2)
Source Code:
#include <iostream>
using namespace std;
int shellSort(int arr[], int n)
{
for (int gap = n/2; gap > 0; gap /= 2)
{
for (int i = gap; i < n; i += 1)
{
int temp = arr[i];
int j;
for (j = i; j >= gap && arr[j - gap] > temp; j -= gap)
arr[j] = arr[j - gap];
arr[j] = temp;
}
}
return 0;
}
void printArray(int arr[], int n)
{
for (int i=0; i<n; i++)
cout << arr[i] << " ";
}

int main()
{
int arr[] = {50, 20, 30, 10, 40}, i;
int n = sizeof(arr)/sizeof(arr[0]);

cout << "Array before sorting: \n";


printArray(arr, n);

shellSort(arr, n);

cout << "\nArray after sorting: \n";


printArray(arr, n);

return 0;
}
Output:
Experiment-2:
Aim:
To implement Liner Search and Binary Search and analyze its time complexity.
2-A (Linear Search):
Introduction:
Linear Search is a method for finding an element within a list. It sequentially checks elements
of the list until a match is found.
A linear search makes atmost n comparisons where n is the length of string.
Algorithm:
Step 1: Set i = 0
Step 2: If i>n, the goto step 7.
Step 3: If A[i] = X, then goto step 6.
Step 4: Set i  i+1
Step 5: Goto step 2.
Step 6: Print element X found at index i and goto step 8.
Step 7: Print element not found.
Step 8: Exit.
Time Complexity:
Best Case: O(n)
Average Case: O(n)
Worst Case: O(1)
Source Code:
#include <iostream>
#include <time.h>
using namespace std;
int main()
{
clock_t start, end;
double cpu_time_used;
start = clock();
int arr[7] = {10,55,20,30,90,5,2};
int x;
cout<<"Enter element to be searched: ";
cin>>x;
int flag = 0;
for(int i = 0; i < 10; i++)
{
if(arr[i] == x)
{
flag = 1;
break;
}
}
if(flag == 1)
cout<<"\nElement found!";
else
cout<<"\nElement not found!";

end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nTime taken %f seconds to execute linear search",
cpu_time_used);
return 0;
}

Output:

2-B (Binary Search):


Introduction:
Search a sorted array by repeatedly dividing the search interval in half. Begin with an interval
converting the whole array of value of the search key is less than the item in the middle of
interval, narrow the interval to the lower half otherwise check until the value is found or the
interval is empty.
Algorithm:
Step 1: Let l = 0; r= n-1, where n is array size.
Step 2: While (l<=r), repeat step 3, 4, 5, 6.
Step 3: Set mid = (l+r)/2.
Step 4: If a mid == s, return and print “Not Found”.
Step 5: If s>a[mid], set l = mid + 1.
Step 6: Else set r = mid -1.
Step 7: Print “Not Found”.
Time Complexity:
Best Case: O(1)
Average Case: O(logn)
Worst Case: O(logn)
Source Code:
#include <iostream>
#include <time.h>
using namespace std;
int main()
{
clock_t start, end;
double cpu_time_used;
start = clock();
int arr[] = {10,20,30,50,60,100,120}; //sorted array
int x;
cout<<"Enter element to be searched: ";
cin>>x;
int i = 0, j = 9;
int flag = 0;
while(i <= j)
{
int mid = i + (j-i)/2;
if(arr[mid] < x)
i = mid + 1;
else if(arr[mid] > x)
j = mid - 1;
else if(arr[mid] == x)
{
flag = 1;
break;
}
else
flag = 0;
}
if(flag == 1)
cout<<"\nElement found!";
else
cout<<"\nElement not found!";

end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nTime taken %f seconds to execute binary search", cpu_time_used);
return 0;
}
Output:
Experiment-3:
Aim:
To implement matrix multiplication and analyze its time complexity.
Introduction:
Matrix chain multiplication is an optimization problem that can be solved using dynamic
programming. Given a sequence of matrices, the goal is to find the most efficient way to
multiply these matrices.
Algorithm:
Step 1: Start the Program.

Step 2: Enter the row and column of the first (a) matrix.

Step 3: Enter the row and column of the second (b) matrix.

Step 4: Enter the elements of the first (a) matrix.

Step 5: Enter the elements of the second (b) matrix.

Step 6: Print the elements of the first (a) matrix in matrix form.

Step 7: Print the elements of the second (b) matrix in matrix form.

Step 8: Set a loop up to row.

Step 9: Set an inner loop up to the column.

Step 10: Set another inner loop up to the column.

Step 11: Multiply the first(a) and second(b) matrix and store the element in the third

matrix(c)

Step 12: Print the final matrix.

Step 13: Stop the Program.

Source Code:
#include <iostream>
using namespace std;
int main()
{
int a[10][10], b[10][10], mul[10][10], r, c, i, j, k;
cout << "enter the number of row=";
cin >> r;
cout << "enter the number of column=";
cin >> c;
cout << "enter the first matrix element=\n";
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
cin >> a[i][j];
}
}
cout << "enter the second matrix element=\n";
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
cin >> b[i][j];
}
}
cout << "multiply of the matrix=\n";
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
mul[i][j] = 0;
for (k = 0; k < c; k++)
{
mul[i][j] += a[i][k] * b[k][j];
}
}
}
// for printing result
for (i = 0; i < r; i++)
{
for (j = 0; j < c; j++)
{
cout << mul[i][j] << " ";
}
cout << "\n";
}
return 0;
}
Output:
Experiment-4:
Aim:
To implement Longest Common Subsequence Problem and analyse its time complexity.
Introduction:
The longest common subsequence (LCS) is defined as the longest subsequence that is
common to all the given sequences, provided that the elements of the subsequence are not
required to occupy consecutive positions within the original sequences.

If S1 and S2 are the two given sequences then, Z is the common subsequence of S1 and S2 if
Z is a subsequence of both S1 and S2. Furthermore, Z must be a strictly increasing sequence
of the indices of both S1 and S2.

In a strictly increasing sequence, the indices of the elements chosen from the original
sequences must be in ascending order in Z.
Algorithm:
Step 1: Create a table of dimension n+1*m+1 where n and m are the lengths of X and Y
respectively. The first row and the first column are filled with zeros.
Step 2: Fill each cell of the table using the following logic.
Step 3: If the character correspoding to the current row and current column are matching,
then fill the current cell by adding one to the diagonal element. Point an arrow to the diagonal
cell.
Step 4: Else take the maximum value from the previous column and previous row element
for filling the current cell. Point an arrow to the cell with maximum value. If they are equal,
point to any of them.
Step 5: Step 2 is repeated until the table is filled.
Step 6: The value in the last row and the last column is the length of the longest common
subsequence.
Step 7: In order to find the longest common subsequence, start from the last element and
follow the direction of the arrow. The elements corresponding to () symbol form the longest
common subsequence.
Time Complexity:
O(m*n)
Source Code:
#include <bits/stdc++.h>
using namespace std;
int lcs( char *X, char *Y, int m, int n )
{
if (m == 0 || n == 0)
return 0;
if (X[m-1] == Y[n-1])
return 1 + lcs(X, Y, m-1, n-1);
else
return max(lcs(X, Y, m, n-1), lcs(X, Y, m-1, n));
}
int main()
{
char X[] = "AGGTAB";
char Y[] = "GXTXAYB";

int m = strlen(X);
int n = strlen(Y);

cout<<"Length of LCS is "<< lcs( X, Y, m, n ) ;

return 0;
}
Output:
Experiment-5:
Aim:
To implement Optimal Binary Search Tree problem and analyse its time complexity.
Introduction:
A set of integers are given in the sorted order and another array freq to frequency count. Our
task is to create a binary search tree with those data to find the minimum cost for all
searches.
An auxiliary array cost[n, n] is created to solve and store the solution of subproblems. Cost
matrix will hold the data to solve the problem in a bottom-up manner.
Algorithm:
Step 1: Check if tree is NULL, if the tree is not NULL then follow the following steps.
Step 2: Compare the key to be searched with the root of the BST.
Step 3: If the key is lesser than the root then search in the left subtree.
Step 4: If the key is greater than the root then search in the right subtree.
Step 5: If the key is equal to root then, return and print search successful.
Step 6: Repeat step 3, 4 or 5 for the obtained subtree.
Time Complexity:
Best Case: Ω(n^3)
Average Case: Ꝋ(n^3)
Worst Case: O(n^3)
Source Code:
#include <bits/stdc++.h>
using namespace std;
int sum(int freq[], int i, int j);
int optCost(int freq[], int i, int j)
{
if (j < i)
return 0;
if (j == i)
return freq[i];
int fsum = sum(freq, i, j);
int min = INT_MAX;
for (int r = i; r <= j; ++r)
{
int cost = optCost(freq, i, r - 1) +
optCost(freq, r + 1, j);
if (cost < min)
min = cost;
}
return min + fsum;
}
int optimalSearchTree(int keys[],
int freq[], int n)
{
return optCost(freq, 0, n - 1);
}
int sum(int freq[], int i, int j)
{
int s = 0;
for (int k = i; k <= j; k++)
s += freq[k];
return s;
}
int main()
{
int keys[] = {10, 12, 20};
int freq[] = {34, 8, 50};
int n = sizeof(keys) / sizeof(keys[0]);
cout << "Cost of Optimal BST is "
<< optimalSearchTree(keys, freq, n);
return 0;
}
Output:
Experiment-6:
Aim:
To implement Huffman Coding and analyze its time complexity.
Introduction:
Huffman Coding is a technique of compressing data to reduce its size without losing any of
the details. It was first developed by David Huffman. Huffman Coding is generally useful to
compress the data in which there are frequently occurring characters. Using the Huffman
Coding technique, we can compress the string to a smaller size. Huffman coding first creates
a tree using the frequencies of the character and then generates code for each character.
Algorithm:
Step 1: Calculate the frequency of each character in the string.
Step 2: Sort the characters in increasing order of the frequency. These are stored in a priority
queue Q.
Step 3: Make each unique character as a leaf node.
Step 4: Create an empty node z. Assign the minimum frequency to the left child of z and
assign the second minimum frequency to the right child of z. Set the value of the z as the sum
of the above two minimum frequencies.
Step 5: Remove these two minimum frequencies from Q and add the sum into the list of
frequencies.
Step 6: Insert node z into the tree.
Step 7: Repeat steps 3 to 5 for all the characters and for each non-leaf node, assign 0 to the
left edge and 1 to the right edge.
Time Complexity:
O(n logn)
Source Code:
#include<iostream>
#include<queue>
#include<string>
using namespace std;
struct node{
int freq;
char data;
const node *child0, *child1;
node(char d, int f = -1){
data = d;
freq = f;
child0 = NULL;
child1 = NULL;
}
node(const node *c0, const node *c1){
data = 0;
freq = c0->freq + c1->freq;
child0=c0;
child1=c1;
}
bool operator<( const node &a ) const {
return freq >a.freq;
}
void traverse(string code = "")const{
if(child0!=NULL){
child0->traverse(code+'0');
child1->traverse(code+'1');
}else{
cout << "Data: " << data<< ", Frequency: "<<freq << ", Code: " << code<<endl;
}
}
};
void huffmanCoding(string str){
priority_queue<node> qu;
int frequency[256];
for(int i = 0; i<256; i++)
frequency[i] = 0;
for(int i = 0; i<str.size(); i++){
frequency[int(str[i])]++;
}
for(int i = 0; i<256; i++){
if(frequency[i]){
qu.push(node(i, frequency[i]));
}
}
while(qu.size() >1){
node *c0 = new node(qu.top());
qu.pop();
node *c1 = new node(qu.top());
qu.pop();
qu.push(node(c0, c1));
}
cout << "The Huffman Code: "<<endl;
qu.top().traverse();
}
main(){
string str = "ACCEBFFFFAAXXBLKE";
huffmanCoding(str);
}
Output:
Experiment-7:
Aim:
To implement Dijkstra’s algorithm and analyze its time complexity.
Introduction:
Dijkstra's algorithm allows us to find the shortest path between any two vertices of a graph. It
differs from the minimum spanning tree because the shortest distance between two vertices
might not include all the vertices of the graph. Dijkstra used this property in the opposite
direction i.e. we overestimate the distance of each vertex from the starting vertex. Then we
visit each node and its neighbors to find the shortest sub-path to those neighbors. The
algorithm uses a greedy approach in the sense that we find the next best solution hoping that
the end result is the best solution for the whole problem.
Algorithm:
Step 1: Start with a weighted graph.
Step 2: Choose a starting vertex and assign infinity path values to all other devices.
Step 3: Go to each vertex and update its path length.
Step 4: If the path length of the adjacent vertex is lesser than new path length, don't update it.
Step 5: Avoid updating path lengths of already visited vertices.
Step 6: After each iteration, we pick the unvisited vertex with the least path length. So we
choose 5 before 7.
Step 7: Notice how the rightmost vertex has its path length updated twice.
Step 8: Repeat until all the vertices have been visited.
Time Complexity: O (E logV)
Space Complexity: O (V)
Source Code:
#include<iostream>
#include<stdio.h>
using namespace std;
#define INFINITY 9999
#define max 5
void dijkstra(int G[max][max],int n,int startnode);
int main() {
int G[max][max]={{0,1,0,3,10},{1,0,5,0,0},{0,5,0,2,1},{3,0,2,0,6},{10,0,1,6,0}};
int n=5;
int u=0;
dijkstra(G,n,u);
return 0;
}
void dijkstra(int G[max][max],int n,int startnode) {
int cost[max][max],distance[max],pred[max];
int visited[max],count,mindistance,nextnode,i,j;
for(i=0;i<n;i++)
for(j=0;j<n;j++)
if(G[i][j]==0)
cost[i][j]=INFINITY;
else
cost[i][j]=G[i][j];
for(i=0;i<n;i++) {
distance[i]=cost[startnode][i];
pred[i]=startnode;
visited[i]=0;
}
distance[startnode]=0;
visited[startnode]=1;
count=1;
while(count<n-1) {
mindistance=INFINITY;
for(i=0;i<n;i++)
if(distance[i]<mindistance&&!visited[i]) {
mindistance=distance[i];
nextnode=i;
}
visited[nextnode]=1;
for(i=0;i<n;i++)
if(!visited[i])
if(mindistance+cost[nextnode][i]<distance[i]) {
distance[i]=mindistance+cost[nextnode][i];
pred[i]=nextnode;
}
count++;
}
for(i=0;i<n;i++)
if(i!=startnode) {
cout<<"\nDistance of node"<<i<<"="<<distance[i];
cout<<"\nPath="<<i;
j=i;
do {
j=pred[j];
cout<<"<-"<<j;
}while(j!=startnode);
}
}
Output:
Experiment-8:
Aim:
To implement Bellman Ford Algorithm and analyze its time complexity.
Introduction:
Bellman Ford algorithm helps us find the shortest path from a vertex to all other vertices of a
weighted graph. It is similar to Dijkstra's algorithm but it can work with graphs in which
edges can have negative weights.
Bellman Ford algorithm works by overestimating the length of the path from the starting
vertex to all other vertices. Then it iteratively relaxes those estimates by finding new paths
that are shorter than the previously overestimated paths.
Algorithm:
Step 1: Start with the weighted graph.
Step 2: Choose a starting vertex and assign infinity path values to all other vertices.
Step 3: Visit each edge and relax the path distances if they are inaccurate.
Step 4: We need to do this V times because in the worst case, a vertex’s path length might
need to be readjusted V times.
Step 5: Notice how the vertex at the top right corner had its path length adjusted.
Step 6: After all the vertices have their path lengths, we check if a negative cycle is present.
Time Complexity:
Best Case: O(E)
Average Case: O(VE)
Worst Case: O(VE)
Space Complexity: O(V)
Source Code:
#include <bits/stdc++.h>
struct Edge {
int src, dest, weight;
};
struct Graph {
int V, E;
struct Edge* edge;
};
struct Graph* createGraph(int V, int E) {
struct Graph* graph = new Graph;
graph->V = V;
graph->E = E;
graph->edge = new Edge[E];
return graph;
}
void BellmanFord(struct Graph* graph, int src) {
int V = graph->V;
int E = graph->E;
int dist[V];
for (int i = 0; i < V; i++)
dist[i] = INT_MAX;
dist[src] = 0;
for (int i = 1; i <= V - 1; i++) {
for (int j = 0; j < E; j++) {
int u = graph->edge[j].src;
int v = graph->edge[j].dest;
int weight = graph->edge[j].weight;
if (dist[u] != INT_MAX && dist[u] + weight < dist[v])
dist[v] = dist[u] + weight;
}
}
for (int i = 0; i < E; i++) {
int u = graph->edge[i].src;
int v = graph->edge[i].dest;
int weight = graph->edge[i].weight;
if (dist[u] != INT_MAX && dist[u] + weight < dist[v]) {
printf("Graph contains negative weight cycle");
return;
}
}
printf("Vertex :\t\t");
for (int i = 0; i < V; ++i)
printf("%d \t", i);
printf("\nDistance From Source : ");
for (int i = 0; i < V; ++i)
printf("%d \t",dist[i]);
return;
}
int main() {
int V = 5;
int E = 8;
struct Graph* graph = createGraph(V, E);
graph->edge[0].src = 0;
graph->edge[0].dest = 1;
graph->edge[0].weight = -1;
graph->edge[1].src = 0;
graph->edge[1].dest = 2;
graph->edge[1].weight = 4;
graph->edge[2].src = 1;
graph->edge[2].dest = 2;
graph->edge[2].weight = 3;
graph->edge[3].src = 1;
graph->edge[3].dest = 3;
graph->edge[3].weight = 2;
graph->edge[4].src = 1;
graph->edge[4].dest = 4;
graph->edge[4].weight = 2;
graph->edge[5].src = 3;
graph->edge[5].dest = 2;
graph->edge[5].weight = 5;
graph->edge[6].src = 3;
graph->edge[6].dest = 1;
graph->edge[6].weight = 1;
graph->edge[7].src = 4;
graph->edge[7].dest = 3;
graph->edge[7].weight = -3;
BellmanFord(graph, 0);
return 0;
}

Output:

You might also like