Snippets Collections
import React, { useState } from "react";
import {
  BackHomeButton,
  CommandPalletteButton,
  MinimalPage,
  PageHeading,
} from "ui";
import { BugReportButton, CommandInterface, Navigation } from "@/components";
import ManagerAndCoatingForm from "@/components/forms/managerAndCoatingForm";
import { UpdateCoating, UpdateManager } from "@/types/vessel";

const MenteithUpdater: React.FC = () => {
  const [managerData, setManagerData] = useState<UpdateManager[]>([]);
  const [coatingData, setCoatingData] = useState<UpdateCoating[]>([]);

  return (
    <MinimalPage
      pageTitle={"Update Vessel Manager | Vessel Interface"}
      pageDescription={"Vessel Interface | Update Vessel Manager"}
    >
      <div className="flex w-full flex-row justify-between pl-1 pt-1">
        <div>
          <BackHomeButton />
        </div>
        <Navigation />
        <div className="flex flex-row gap-4">
          <BugReportButton />
          <CommandPalletteButton />
          <CommandInterface />
        </div>
      </div>

      <PageHeading text="Update Vessel Manager" />

      <ManagerAndCoatingForm
        onManagerDataUpdate={setManagerData}
        onCoatingDataUpdate={setCoatingData}
      />
    </MinimalPage>
  );
};

export default MenteithUpdater;
import java.util.*;

public class Knapsack {

    public static double greedyKnapSack(ItemValue[] items, int capacity) {
        Arrays.sort(items, (a, b) -> Double.compare((double) b.profit / b.weight, (double) a.profit / a.weight));

        double totalProfit = 0;
        for (ItemValue item : items) {
            if (capacity >= item.weight) {
                capacity -= item.weight;
                totalProfit += item.profit;
            } else {
                totalProfit += (double) capacity / item.weight * item.profit;
                break;
            }
        }
        return totalProfit;
    }

    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);
        System.out.print("Enter number of items: ");
        int n = sc.nextInt();
        ItemValue[] items = new ItemValue[n];
        
        System.out.println("Enter weight and profit of each item:");
        for (int i = 0; i < n; i++) {
            items[i] = new ItemValue(sc.nextInt(), sc.nextInt());
        }

        System.out.print("Enter capacity: ");
        int capacity = sc.nextInt();
        
        System.out.println("Maximum profit: " + greedyKnapSack(items, capacity));
        sc.close();
    }
}

class ItemValue {
    int weight, profit;
    ItemValue(int weight, int profit) {
        this.weight = weight;
        this.profit = profit;
    }
}




function greedyKnapSack(items, n, W):
    sort items in descending order of (profit/weight)

    totalProfit = 0
    remainingCapacity = W

    for each item in items:
        if remainingCapacity >= item's weight:
            totalProfit += item's profit
            remainingCapacity -= item's weight
        else:
            fraction = remainingCapacity / item's weight
            totalProfit += fraction * item's profit
            break

    return totalProfit





Procedure GREEDY_KNAPSACK (P, W, M, X, n):

 and  contain the profits and weights respectively of the  objects arranged so that .

 is the knapsack size, and  is the solution vector.


real P(1:n), W(1:n), X(1:n), M, cu;  
integer i, n;  

X ← 0  // Initialize solution to zero  
cu ← M  // cu = remaining knapsack capacity  

for i ← 1 to n do  
    if W(i) > cu then exit endif  
    X(i) ← 1  
    cu ← cu - W(i)  
repeat  

if i ≤ n then X(i) ← cu/W(i) endif  

end GREEDY_KNAPSACK



OUTPUT:

Enter the number of items: 
3
Enter weight, profit of each item: 
10 60
20 100
30 120
Enter capacity: 
50
Maximum profit: 240.0
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

data = pd.read_csv('Pune_rent.csv')

print(data.head())
print(data.info())

X = data.drop(columns=['rent'])
y = data['rent']

X = pd.get_dummies(X, drop_first=True)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)

mae = mean_absolute_error(y_test, y_pred)
rmse = mean_squared_error(y_test, y_pred, squared=False)
r2 = r2_score(y_test, y_pred)

print("Model Performance:")
print(f"Mean Absolute Error (MAE): {mae:.2f}")
print(f"Root Mean Squared Error (RMSE): {rmse:.2f}")
print(f"R² Score: {r2:.2f}")
data={
    'Age' : [10,None, 30],
 'Name' : ['a', 'b', None],
 'City' : ['x', 'y', 'z'],
 }
df=pd.DataFrame(data)
print(df)
df_fill=df.fillna({'Name' : 'Gilbert', 'Age' : df['Age'].mean()})
print(df_fill)
df_drop=df.dropna()
print(df_drop)
print("no of missing values : \n",df.isnull().sum())
import matplotlib.pyplot as plt
x=[1,2,3,4,5]
y=[14,46,8,72,34]
plt.plot(x,y)
plt.title("practice plot")
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
 import pandas as pd
 from sklearn.datasets import load_iris
 from sklearn.tree import DecisionTreeClassifier
 from sklearn.model_selection import train_test_split
 from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
 iris=load_iris()
 df=pd.DataFrame(data=iris.data,columns=iris.feature_names)
 print(df.head())
 df['Species']=iris.target
 x=df.drop('Species',axis=1)
 y=df['Species']
 x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42)
 model=DecisionTreeClassifier(random_state=42)
 model.fit(x_train,y_train)
 y_pred=model.predict(x_test)
 print(accuracy_score(y_test,y_pred))
 print(confusion_matrix(y_test,y_pred))
 print(classification_report(y_test,y_pred))
 import pandas as pd
 from sklearn.datasets import load_iris
 from sklearn.neighbors import KNeighborsClassifier
 from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
 from sklearn.model_selection import train_test_split
 iris=load_iris()
 df=pd.DataFrame(iris.data,columns=iris.feature_names)
 print(df.head())
 df['Species']=iris.target
 x=df.drop('Species',axis=1)
 y=df['Species']
 x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=42)
 model=KNeighborsClassifier(n_neighbors=3)
 model.fit(x_train,y_train)
 y_pred=model.predict(x_test)
 print(accuracy_score(y_test,y_pred))
 print(confusion_matrix(y_test,y_pred))
 print(classification_report(y_test,y_pred))
 import pandas as ps
 from sklearn.ensemble import RandomForestClassifier
 from sklearn.model_selection import train_test_split
 from sklearn.preprocessing import LabelEncoder
 from sklearn.metrics import accuracy_score,confusion_matrix,classification_report,precision_score
 df=pd.read_csv("loan_data_set.csv")
 print(df.head())
 x=df.iloc[:614]
 y=df.Loan_Status
 encoder=LabelEncoder()
 x_train_enc=x.apply(encoder.fit_transform)
 x_train,x_test,y_train,y_test=train_test_split(x_train_enc,y,test_size=0.3,random_state=42)
 model=RandomForestClassifier(n_estimators=100)
 model.fit(x_train,y_train)
 y_pred=model.predict(x_test)
 print(accuracy_score(y_test,y_pred))
 print(precision_score(y_test,y_pred,average='weighted',pos_label='Y'))
 print(confusion_matrix(y_test,y_pred))
 print(classification_report(y_test,y_pred))
 import pandas as pd
 from sklearn.datasets import load_iris
 import matplotlib.pyplot as plt
 from scipy.cluster.hierarchy import dendrogram,linkage,fcluster
 from sklearn.preprocessing import StandardScaler
 iris=load_iris()
 df=pd.DataFrame(iris.data,columns=iris.feature_names)
 print(df.head())
 scaler=StandardScaler()
 scaled_data=scaler.fit_transform(df)
 z=linkage(scaled_data,method='ward')
 plt.figure(figsize=(7,5))
 dendrogram(z,labels=iris.target)
 plt.show()
 s=3
 cluster=fcluster(z,t=s,criterion='maxclust')
 df['cluster']=cluster
 df['Species']=iris.target
 print(df.groupby(['cluster','Species']).size())
 import pandas as pd
 from sklearn.cluster import KMeans
 from sklearn.datasets import load_iris
 from sklearn.preprocessing import StandardScaler
 from sklearn.metrics import silhouette_score,adjusted_rand_score
 iris=load_iris()
 df=pd.DataFrame(iris.data,columns=iris.feature_names)
 print(df.head())
 scaler=StandardScaler()
 x_scaled=scaler.fit_transform(df)
 kmeans=KMeans(n_clusters=3,random_state=42)
 kmeans.fit(x_scaled)
 cluster_centers=kmeans.cluster_centers_
 print(cluster_centers)
 true_labels=iris.target
 print(silhouette_score(x_scaled,kmeans.labels_))
 print(adjusted_rand_score(true_labels,kmeans.labels_))
 import pandas as pd
 from sklearn.datasets import load_iris
 from sklearn.preprocessing import StandardScaler
 from sklearn.decomposition import PCA
 import seaborn as sns
 import matplotlib.pyplot as plt
 df=pd.read_csv("iris.csv")
 print(df.head())
 print(df.isnull().sum())
 #df.fillna(df.mean(),inplace=True)
 df_encoded=pd.get_dummies(df,columns=['Species'],drop_first=True)
 print(df_encoded.head())
 x=df_encoded
 x_scaled=StandardScaler().fit_transform(x)
 pca=PCA(n_components=2)
 x_pca=pca.fit_transform(x_scaled)
power iso if imager dont work

tools > usb tools > create bootable usb

clone 
server
ui

@ECHO OFF
set list=Desktop Documents Downloads Favorites Music Pictures Videos
set baseLocation="%USERPROFILE%\OneDrive - Olivet Nazarene University\LocalPC\"
set "Key=HKCU\Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders"
set "Typ=REG_EXPAND_SZ"
set RegList="Desktop" "Personal" "{374DE290-123F-4565-9164-39C4925E467B}" "Favorites" "My Music" "My Pictures" "My Video"
set /a c=0
setLocal enableDelayedExpansion
for %%j in (%RegList%) do (
    set RegList[!c!]=%%j & set /a c=c+1
)
for %%i in (%list%) do (
    if not exist %baseLocation%%%i (
        mkdir %baseLocation%%%i
    ) else (
        echo %%i already exsists
    )
)
set baseLocation=%baseLocation:"=%
for %%i in (%list%) do (
    RoboCopy.exe "%USERPROFILE%\%%~i\." "%baseLocation%\%%~i\." *.* /MOV /FP /NP /IS /Z /E /NFL /NDL /NJH
)
set /a d=0
for %%k in (%list%) do (
    call set val=%%RegList[!d!]%% & Reg Add "%Key%" /f /V !val! /T %Typ% /D "%baseLocation%%%k" & set /a d=d+1
 For /D /R j:\test %%1 IN (*) DO c:\trid_w32\trid "%%1"\* -ae
 
 
Replace j:\test with the directory that you want to move recursively through (TrID will not run on the files in the root of this directory).
 
Replace c:\trid_w32\trid with the path to trid.exe.
 
Dump the line in a batch file and run.
 #include <iostream>
#include <queue>
using namespace std;

class Node {
public:
    int data;
    Node* left;
    Node* right;

    Node(int d) {
        this->data = d;
        this->left = NULL;
        this->right = NULL;
    }
};

Node* insertIntoBST(Node* root, int d) {
    // base case 
    if (root == NULL) {
        root = new Node(d); // Create a new node
        return root; // Return the newly created node
    }
    if (d > root->data) {
        // Insert in the right subtree
        root->right = insertIntoBST(root->right, d);
    } else {
        // Insert in the left subtree
        root->left = insertIntoBST(root->left, d);
    }
    return root; // Return the root of the subtree
}

void levelOrderTraversal(Node* root) {
    if (root == NULL) 
        return; // If the tree is empty, return

    queue<Node*> q;
    q.push(root);

    while (!q.empty()) {
        Node* temp = q.front();
        q.pop();
        cout << temp->data << " "; // Print the current node's data

        // Push left and right children into the queue
        if (temp->left) {
            q.push(temp->left);
        }
        if (temp->right) {
            q.push(temp->right);
        }
    }
    cout << endl; // Print a new line after level order traversal
}

void takeInput(Node*& root) {
    int data;
    cin >> data;

    while (data != -1) {
        root = insertIntoBST(root, data); // Update the root pointer
        
        // Print the current state of the BST after each insertion
        cout << "Current state of the BST after inserting " << data << ": ";
        levelOrderTraversal(root);
        
        cin >> data;
    }
}
void inorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    inorder(root->left);
    cout << root-> data << " ";
    inorder(root->right);
}
void preorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    cout << root-> data << " ";
    preorder(root->left);
    preorder(root->right);
}
void postorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    postorder(root->left);
    postorder(root->right);
    cout << root-> data << " ";
}
Node* minVal(Node* root){
    Node* temp = root;
    
    while(temp->left != NULL){
        temp = temp->left;
    }
    return temp;
}
Node* maxVal(Node* root){
    Node* temp = root;
    
    while(temp->right != NULL){
        temp = temp->right;
    }
    return temp;
}
int main() {
    Node* root = NULL;
    cout << "Enter the data for BST (end with -1): ";
    takeInput(root);
    
    cout << "printing inorder" << endl;
    inorder(root);
    
    cout << endl <<"printing preorder" << endl;
    preorder(root);
    
    cout << endl<<  "printing postorder" << endl;
    postorder(root);
    
    cout <<endl <<" min value is "<< minVal(root) -> data << endl;
    cout <<" min value is "<< maxVal(root) -> data << endl;
    
    
    
    
    return 0;
}


//// Time complexity = O(logn)
///************** BST QUESTION*************///////////// 
/// IN THIS QUESTION THEY WILL GIVE US A NUMBER AND IF IT IS PRESENT IN THAT TREE GIVE US TRUE OTHERWISE RETURN FALSE TO JUS 

/// APPROACH 
// if we get null return false otherwise if root->data > (number passed) go to the left side else go to right side 

///// FIRST APPROACH 
bool searchInBST(BinaryTreeNode<int> *root, int x){
    BinaryTreeNode<int> *temp = root;
    
    while(temp != NULL)
    
    if(temp->data == x){
        return true;
    }
    if(temp->data > x){
        temp = temp->left;
    }
    else{
        temp = temp->right;
    }
    return false;
}

////// second approach 
bool searchInBST(BinaryTreeNode<int> *root , int x ){
    // base case 
    if(root == Null){
        return false;
    }
    
    if(root ->data == x){
        return true;
    }
    if(root->data > x){
      return seachInBST(root->left , x ); 
    }
    else{
         return seachInBST(root->right, x ); 
    }
///************** BST QUESTION*************///////////// 
/// IN THIS QUESTION THEY WILL GIVE US A NUMBER AND IF IT IS PRESENT IN THAT TREE GIVE US TRUE OTHERWISE RETURN FALSE TO JUS 

/// APPROACH 
// if we get null return false otherwise if root->data > (number passed) go to the left side else go to right side 

///// FIRST APPROACH 
bool searchInBST(BinaryTreeNode<int> *root, int x){
    BinaryTreeNode<int> *temp = root;
    
    while(temp != NULL)
    
    if(temp->data == x){
        return true;
    }
    if(temp->data > x){
        temp = temp->left;
    }
    else{
        temp = temp->right;
    }
    return false;
}

////// second approach 
bool searchInBST(BinaryTreeNode<int> *root , int x ){
    // base case 
    if(root == Null){
        return false;
    }
    
    if(root ->data == x){
        return true;
    }
    if(root->data > x){
      return seachInBST(root->left , x ); 
    }
    else{
         return seachInBST(root->right, x ); 
    }
public class MergeSort {
   
    public static void mergeSort(int[] arr, int left, int right, int[] temp) {
        if (left < right) {
            int mid = (left + right) / 2;
            mergeSort(arr, left, mid, temp);
            mergeSort(arr, mid + 1, right, temp);
            merge(arr, left, mid, right, temp);
        }
    }

    public static void merge(int[] arr, int left, int mid, int right, int[] temp) {
        int i = left;
        int j = mid + 1;
        int k = left;

        while (i <= mid && j <= right) {
            if (arr[i] <= arr[j]) {
                temp[k] = arr[i];
                i++;
            } else {
                temp[k] = arr[j];
                j++;
            }
            k++;
        }

        while (i <= mid) {
            temp[k] = arr[i];
            i++;
            k++;
        }

        while (j <= right) {
            temp[k] = arr[j];
            j++;
            k++;
        }

        for (i = left; i <= right; i++) {
            arr[i] = temp[i];
        }
    }

    public static void main(String[] args) {
        int[] arr = {38, 27, 43, 3, 9, 82, 10};
        int[] temp = new int[arr.length];
        mergeSort(arr, 0, arr.length - 1, temp);

        System.out.print("Sorted array: ");
        for (int num : arr) {
            System.out.print(num + " ");
        }
    }
}
import java.util.Scanner;

public class QuickSort {

    public static void quicksort(int[] a, int lb, int ub) {
        int pivot, start, end;
        pivot = a[lb];
        start = lb;
        end = ub;

        if (start < end) {
            while (start < end) {
                while (start < end && a[start] <= pivot) {
                    start++;
                }
                while (a[end] > pivot) {
                    end--;
                }
                if (start < end) {
                    swap(a, start, end);
                }
            }
            swap(a, lb, end);

            quicksort(a, lb, end - 1);
            quicksort(a, end + 1, ub);
        }
    }

    
    public static void swap(int[] a, int i, int j) {
        int temp = a[i];
        a[i] = a[j];
        a[j] = temp;
    }

    public static void display(int[] a) {
        System.out.println("Sorted array:");
        for (int i : a) {
            System.out.print(i + "\t");
        }
        System.out.println();
    }

    public static void main(String[] args) {
        Scanner scanner = new Scanner(System.in);
        System.out.println("Enter array size:");
        int n = scanner.nextInt();
        int[] a = new int[n];

        System.out.println("Enter elements into array:");
        for (int i = 0; i < n; i++) {
            a[i] = scanner.nextInt();
        }

        quicksort(a, 0, n - 1);
        display(a);

        scanner.close();
    }
}
#include <iostream>
#include <queue>
using namespace std;

class Node {
public:
    int data;
    Node* left;
    Node* right;

    Node(int d) {
        this->data = d;
        this->left = NULL;
        this->right = NULL;
    }
};

Node* insertIntoBST(Node* root, int d) {
    // base case 
    if (root == NULL) {
        root = new Node(d); // Create a new node
        return root; // Return the newly created node
    }
    if (d > root->data) {
        // Insert in the right subtree
        root->right = insertIntoBST(root->right, d);
    } else {
        // Insert in the left subtree
        root->left = insertIntoBST(root->left, d);
    }
    return root; // Return the root of the subtree
}

void levelOrderTraversal(Node* root) {
    if (root == NULL) 
        return; // If the tree is empty, return

    queue<Node*> q;
    q.push(root);

    while (!q.empty()) {
        Node* temp = q.front();
        q.pop();
        cout << temp->data << " "; // Print the current node's data

        // Push left and right children into the queue
        if (temp->left) {
            q.push(temp->left);
        }
        if (temp->right) {
            q.push(temp->right);
        }
    }
    cout << endl; // Print a new line after level order traversal
}

void takeInput(Node*& root) {
    int data;
    cin >> data;

    while (data != -1) {
        root = insertIntoBST(root, data); // Update the root pointer
        
        // Print the current state of the BST after each insertion
        cout << "Current state of the BST after inserting " << data << ": ";
        levelOrderTraversal(root);
        
        cin >> data;
    }
}
void inorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    inorder(root->left);
    cout << root-> data << " ";
    inorder(root->right);
}
void preorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    cout << root-> data << " ";
    preorder(root->left);
    preorder(root->right);
}
void postorder(Node* root) {
    //base case
    if(root == NULL) {
        return ;
    }
    postorder(root->left);
    postorder(root->right);
    cout << root-> data << " ";
}

int main() {
    Node* root = NULL;
    cout << "Enter the data for BST (end with -1): ";
    takeInput(root);
    
    cout << "printing inorder" << endl;
    inorder(root);
    
    cout << endl <<"printing preorder" << endl;
    preorder(root);
    
    cout << endl<<  "printing postorder" << endl;
    postorder(root);
    return 0;
}


//// Time complexity = O(logn)
#KMeans Classification
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# Sample dataset: Fruit type with average RGB values
data = {'Fruit': ['Apple', 'Apple', 'Apple', 'Orange', 'Orange', 'Orange', 'Banana', 'Banana', 'Banana'],
        'Red': [180, 190, 170, 255, 250, 245, 240, 230, 220],
        'Green': [20, 30, 25, 120, 110, 105, 240, 230, 220],
        'Blue': [30, 40, 35, 70, 65, 60, 240, 230, 220]}

df = pd.DataFrame(data)
print(df)
# Extracting the features (Red, Green, Blue)
X = df[['Red', 'Green', 'Blue']]

# Standardize the data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Apply K-Means with 3 clusters
kmeans = KMeans(n_clusters=3, random_state=42)
kmeans.fit(X_scaled)

# Get the cluster labels
df['Cluster'] = kmeans.labels_
print(df)
# Reduce data to 2D using PCA for visualization
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_scaled)

# Plot the clusters
plt.figure(figsize=(8, 6))
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=kmeans.labels_, cmap='viridis')
plt.title('Fruit Clusters (K-Means)')
plt.xlabel('PCA Component 1')
plt.ylabel('PCA Component 2')
plt.show()
#Decision Tree

from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import pandas as pd

# Load Soybean Dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/soybean/soybean-small.data"
columns = ['date', 'plant-stand', 'precip', 'temp', 'hail', 'crop-hist', 'area-damaged', 'severity', 'seed-tmt',
           'germination', 'plant-growth', 'leaves', 'leafspots-halo', 'leafspots-marg', 'leafspot-size',
           'leaf-shread', 'leaf-malf', 'leaf-mild', 'stem', 'lodging', 'stem-cankers', 'canker-lesion',
           'fruiting-bodies', 'external-decay', 'mycelium', 'int-discolor', 'sclerotia', 'fruit-pods',
           'roots', 'class'
          ]

data = pd.read_csv(url, header = None, names = columns)

X = data.drop(columns = ['class'])
y = data['class']

X = pd.get_dummies(X)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
decision_tree = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=42)
decision_tree.fit(X_train, y_train)

y_pred = decision_tree.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)
conf = confusion_matrix(y_test, y_pred)
classR = classification_report(y_test, y_pred)

print(accuracy)
print(conf)
print(classR)
#Naive Bayes

from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report

data = load_iris()
X = data.data
y = (data.target == 2).astype(int)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)


model = GaussianNB()
model.fit(X_train, y_train)

y_pred = model.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)
conf = confusion_matrix(y_test, y_pred)
classR = classification_report(y_test, y_pred)

print(accuracy)
print(conf)
print(classR)
#KNN

from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.datasets import load_iris

data = load_iris()
X = data.data
y = data.target

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)


knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, y_train)

y_pred = knn.predict(X_test)


accuracy = accuracy_score(y_test, y_pred)
conf = confusion_matrix(y_test, y_pred)
classR = classification_report(y_test, y_pred)

print(accuracy)
print(conf)
print(classR)
#Linear Regression

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_classification
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

X, y = make_classification(n_samples = 1000, n_features = 2, n_redundant =0, n_informative = 2,  n_classes = 2, random_state = 42)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)

linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
y_pred_prob = linear_model.predict(X_test)
y_pred = (y_pred_prob >=0.5).astype(int)

cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize = (6, 4))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False)
#Logistic Regression
# Import necessary libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.datasets import load_iris

# Load sample data
data = load_iris()
X = data.data
y = (data.target == 2).astype(int)  # Create a binary target for the logistic regression example

# Split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Create and train the logistic regression model
model = LogisticRegression()
model.fit(X_train, y_train)

# Make predictions
y_pred = model.predict(X_test)

# Evaluate the model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)

print("Accuracy:", accuracy)
print("Confusion Matrix:\n", conf_matrix)
print("Classification Report:\n", class_report)
#Line Plot
import matplotlib.pyplot as plt

x = [1,2,3,4,5]
y = [10, 20, 30, 40, 50]

plt.plot(x,y)
plt.title("Line Plot")
plt.xlabel("X")
plt.ylabel("Y")
plt.show()

#Bar Graph
plt.bar(x,y)
plt.title("Bar Graph")
plt.xlabel("X")
plt.ylabel("Y")
plt.show()

#Histogram
import numpy as np
data = np.random.randn(1000)

plt.hist(data, bins = 30)
plt.show()

#Box Plot
import seaborn as sns
import matplotlib.pyplot as plt

# Example data
data = sns.load_dataset('tips')

# Box plot
sns.boxplot(x='day', y='total_bill', data=data)
plt.title('Box Plot')
plt.show()

#Scatter Plot

import matplotlib.pyplot as plt

# Example data
x = [1, 2, 3, 4, 5]
y = [1, 4, 9, 16, 25]

# Scatter plot
plt.scatter(x, y, color='red')
plt.title('Scatter Plot')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
#Pie Chart

import matplotlib.pyplot as plt

# Example data
labels = ['Apple', 'Banana', 'Cherry', 'Date']
sizes = [10, 20, 30, 40]

# Pie chart
plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
plt.title('Pie Chart')
plt.show()
#Heatmap

import seaborn as sns
import matplotlib.pyplot as plt

# Example data
data = np.random.rand(10, 12)

# Heatmap
sns.heatmap(data, cmap='coolwarm', annot=True)
plt.title('Heatmap')
plt.show()
#Pair plot
import seaborn as sns
import matplotlib.pyplot as plt

# Example data
data = sns.load_dataset('iris')

# Pair plot
sns.pairplot(data, hue='species')
plt.title('Pair Plot')
plt.show()
#MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
data = {
    'Feature1' : [100,200,300,400,500],
    'Feature2' : [1,2,3,4,5]
}
data = pd.DataFrame(data)

min_max_scaler = MinMaxScaler()
data = min_max_scaler.fit_transform(data)
data = pd.DataFrame(data)
data
#Standard Scaler
import pandas as pd
from sklearn.preprocessing import StandardScaler
data = {
    'Feature1' : [100,200,300,400,500],
    'Feature2' : [1,2,3,4,5]
}

data = pd.DataFrame(data)

standard_scaler = StandardScaler()
data_scaled = standard_scaler.fit_transform(data)

data_scaled = pd.DataFrame(data_scaled, columns = data.columns)
data_scaled
#OneHot Encoding
import pandas as pd
from sklearn.preprocessing import OneHotEncoder

data_one_hot = pd.get_dummies(data['Color'], prefix = 'Color')
df = pd.DataFrame(data_one_hot)
df
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 8, 12, 15, 18, 24, 28, 30, 34, 40]
}
df = pd.DataFrame(data)
# Standardize the features
scaler = StandardScaler()
X = scaler.fit_transform(df)
# Elbow method for finding optimal k
inertia = []
K = range(1, 11)
for k in K:
 kmeans = KMeans(n_clusters=k, random_state=42)
 kmeans.fit(X)
 inertia.append(kmeans.inertia_)
plt.plot(K, inertia, 'bo-')
plt.xlabel('Number of clusters, k')
plt.ylabel('Inertia')
plt.title('Elbow Method For Optimal k')
plt.show()
# Apply K-Means with the chosen number of clusters (e.g., 3)
kmeans = KMeans(n_clusters=3, random_state=42)
df['Cluster'] = kmeans.fit_predict(X)
# Visualize the clusters
plt.scatter(X[:, 0], X[:, 1], c=df['Cluster'], cmap='viridis', marker='o', edgecolor='k',
s=100)
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=200, c='red',
marker='X') # Cluster centers
plt.xlabel('Feature1')
plt.ylabel('Feature2')
plt.title('K-Means Clustering')
plt.show()
#Label Encoding
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import pandas as pd

data = {
    'Color': ['Red', 'Green', 'Blue', 'Red', 'Blue', 'Green', 'Green']
}

df = pd.DataFrame(data)
label_encoder = LabelEncoder()

data['Color-Label'] = label_encoder.fit_transform(data['Color'])
df = pd.DataFrame(data)
df
#Missing values
import pandas as pd
data = {
    'Name': ['John', 'Alice', 'Steve'],
    'Age': [18, None, 20],
    'City': ['New York', 'California', None]
}

df = pd.DataFrame(data)

df.fillna({'Age': df['Age'].mean(), 'City': 'Unknown'})
import pandas as pd

# Sample dictionary
data = {
    'Name': ['Alice', 'Bob', 'Charlie'],
    'Age': [25, 30, 35],
    'City': ['New York', 'Los Angeles', 'Chicago']
}

# Convert dictionary to DataFrame
df = pd.DataFrame(data)

# Display the DataFrame
print(df)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 10, 15, 20, 25, 30, 35, 40, 45, 50],
 'Target': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
rf = RandomForestClassifier(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
# Make predictions
y_pred = rf.predict(X_test)
# Evaluate model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
print(f'Accuracy: {accuracy:.2f}')
print('Confusion Matrix:')
print(conf_matrix)
print('Classification Report:')
print(class_report)
https://tinyurl.com/ds-lab-datasets-123
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 10, 15, 20, 25, 30, 35, 40, 45, 50],
 'Target': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
tree = DecisionTreeClassifier(max_depth=3, random_state=42)
tree.fit(X_train, y_train)
# Make predictions
y_pred = tree.predict(X_test)
# Evaluate model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
print(f'Accuracy: {accuracy:.2f}')
print('Confusion Matrix:')
print(conf_matrix)
print('Classification Report:')
print(class_report)
public class OBSTree {

    public static void main(String[] args) {
        double[] P = {3, 3, 1, 1};
        double[] Q = {2, 3, 1, 1, 1};

        int n = P.length;

        OBSTResult result = OBST(P, Q, n);

        System.out.println("Cost Matrix C:");
        for (int i = 0; i <= n; i++) {
            for (int j = 0; j <= n; j++) {
                System.out.printf("%.2f ", result.C[i][j]);
            }
            System.out.println();
        }

        System.out.println("\nRoot Matrix R:");
        for (int i = 0; i <= n; i++) {
            for (int j = 0; j <= n; j++) {
                System.out.printf("%d ", result.R[i][j]);
            }
            System.out.println();
        }
    }

    public static OBSTResult OBST(double[] P, double[] Q, int n) {
        double[][] C = new double[n + 1][n + 1];
        double[][] W = new double[n + 1][n + 1];
        int[][] R = new int[n + 1][n + 1];

        for (int i = 0; i <= n; i++) {
            W[i][i] = Q[i];
            C[i][i] = 0;
            R[i][i] = 0;
            if (i < n) {
                W[i][i + 1] = Q[i] + Q[i + 1] + P[i];
                C[i][i + 1] = W[i][i + 1];
                R[i][i + 1] = i + 1;
            }
        }

        for (int m = 2; m <= n; m++) {
            for (int i = 0; i <= n - m; i++) {
                int j = i + m;
                W[i][j] = W[i][j - 1] + P[j - 1] + Q[j];

                double minCost = Double.MAX_VALUE;
                int bestRoot = -1;

                for (int k = R[i][j - 1]; k <= R[i + 1][j]; k++) {
                    double cost = C[i][k - 1] + C[k][j];
                    if (cost < minCost) {
                        minCost = cost;
                        bestRoot = k;
                    }
                }

                C[i][j] = W[i][j] + minCost;
                R[i][j] = bestRoot;
            }
        }

        return new OBSTResult(C, W, R);
    }
}

class OBSTResult {
    double[][] C;
    double[][] W;
    int[][] R;

    OBSTResult(double[][] C, double[][] W, int[][] R) {
        this.C = C;
        this.W = W;
        this.R = R;
    }
}

/*
Test Case 1:
Input:
P = {3, 3, 1, 1}
Q = {2, 3, 1, 1, 1}

Expected Output:
Cost Matrix C:
0.00 2.00 5.00 8.00 9.00 
0.00 0.00 4.00 7.00 8.00 
0.00 0.00 0.00 4.00 5.00 
0.00 0.00 0.00 0.00 1.00 
0.00 0.00 0.00 0.00 0.00 

Root Matrix R:
0 1 1 1 1 
0 0 1 1 2 
0 0 0 3 3 
0 0 0 0 4 
0 0 0 0 0

Test Case 2:
Input:
P = {4, 2, 3, 4, 2}
Q = {3, 1, 2, 1, 2, 3}

Expected Output:
Cost Matrix C:
0.00 3.00 7.00 10.00 15.00 18.00 
0.00 0.00 2.00 5.00 9.00 12.00 
0.00 0.00 0.00 2.00 5.00 7.00 
0.00 0.00 0.00 0.00 2.00 4.00 
0.00 0.00 0.00 0.00 0.00 2.00 
0.00 0.00 0.00 0.00 0.00 0.00 

Root Matrix R:
0 1 2 3 3 4 
0 0 1 2 3 3 
0 0 0 1 2 3 
0 0 0 0 1 2 
0 0 0 0 0 1 
0 0 0 0 0 0
*/
//DijkstraAlgorithm 
import java.util.Scanner;
import java.util.Arrays;

public class DijkstraAlgorithm {

    // Method to find the vertex with the minimum distance value that hasn't been processed yet
    static int getMinDistanceVertex(int[] distance, boolean[] processedVertices, int numberOfVertices) {
        int minDistance = Integer.MAX_VALUE;  // Initialize with a large value
        int minVertexIndex = -1;  // Index of the vertex with the minimum distance

        // Search for the vertex with the smallest distance value
        for (int vertex = 0; vertex < numberOfVertices; vertex++) {
            if (!processedVertices[vertex] && distance[vertex] <= minDistance) {
                minDistance = distance[vertex];  // Update minimum distance
                minVertexIndex = vertex;  // Update index of vertex with minimum distance
            }
        }
        return minVertexIndex;
    }

    // Method to implement Dijkstra's algorithm to find the shortest path from the source
    static void dijkstra(int[][] graph, int[] distance, boolean[] processedVertices, int numberOfVertices, int sourceVertex) {
        // Initialize distances and processedVertices
        Arrays.fill(distance, Integer.MAX_VALUE);  // Set all distances to infinity initially
        Arrays.fill(processedVertices, false);  // Mark all vertices as unprocessed

        distance[sourceVertex] = 0;  // Distance from source to itself is 0

        // Find the shortest path for all vertices
        for (int i = 0; i < numberOfVertices - 1; i++) {
            // Get the vertex with the minimum distance value that hasn't been processed yet
            int currentVertex = getMinDistanceVertex(distance, processedVertices, numberOfVertices);

            // Mark the current vertex as processed
            processedVertices[currentVertex] = true;

            // Update distance values of adjacent vertices of the current vertex
            for (int adjacentVertex = 0; adjacentVertex < numberOfVertices; adjacentVertex++) {
                // If the adjacent vertex is unprocessed, has a path from currentVertex, and distance can be minimized
                if (!processedVertices[adjacentVertex] && graph[currentVertex][adjacentVertex] != 0 &&
                        distance[currentVertex] != Integer.MAX_VALUE &&
                        distance[currentVertex] + graph[currentVertex][adjacentVertex] < distance[adjacentVertex]) {
                    // Update the distance to the adjacent vertex
                    distance[adjacentVertex] = distance[currentVertex] + graph[currentVertex][adjacentVertex];
                }
            }
        }
    }

    // Method to print the solution (distances from source vertex)
    static void printSolution(int[] distance, int numberOfVertices) {
        System.out.println("Vertex   Distance from Source");
        for (int i = 0; i < numberOfVertices; i++) {
            System.out.println(i + " \t\t " + distance[i]);
        }
    }

    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);

        // Taking input for number of vertices
        System.out.print("Enter the number of vertices: ");
        int numberOfVertices = sc.nextInt();

        // Initialize the graph matrix (adjacency matrix)
        int[][] graph = new int[numberOfVertices][numberOfVertices];

        // Taking input for the adjacency matrix (graph)
        System.out.println("Enter the adjacency matrix (0 means no edge between vertices):");
        for (int i = 0; i < numberOfVertices; i++) {
            for (int j = 0; j < numberOfVertices; j++) {
                graph[i][j] = sc.nextInt();
            }
        }

        // Taking input for the source vertex
        System.out.print("Enter the source vertex: ");
        int sourceVertex = sc.nextInt();

        // Arrays to store the distance and processed status of vertices
        int[] distance = new int[numberOfVertices];
        boolean[] processedVertices = new boolean[numberOfVertices];

        // Run Dijkstra's algorithm starting from the source vertex
        dijkstra(graph, distance, processedVertices, numberOfVertices, sourceVertex);

        // Print the shortest distances from the source vertex
        printSolution(distance, numberOfVertices);
    }
}

/*
Test Case
Enter the number of vertices: 5
Enter the adjacency matrix (0 means no edge between vertices):
0 10 0 0 0
0 0 5 0 0
0 0 0 15 0
0 0 0 0 20
0 0 0 0 0
Enter the source vertex: 0
Vertex   Distance from Source
0        0
1        10
2        15
3        30
4        50
*/
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 10, 15, 20, 25, 30, 35, 40, 45, 50],
 'Target': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
nb = GaussianNB()
nb.fit(X_train, y_train)
# Make predictions
y_pred = nb.predict(X_test)
# Evaluate model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
print(f'Accuracy: {accuracy:.2f}')
print('Confusion Matrix:')
print(conf_matrix)
print('Classification Report:')
print(class_report)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# Sample data
data = {
 'Feature1': [2, 4, 4, 4, 6, 6, 6, 8, 8, 8],
 'Feature2': [4, 2, 4, 6, 2, 4, 6, 2, 4, 6],
 'Target': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
# Make predictions
y_pred = knn.predict(X_test)
# Evaluate model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
//DijkstraAlgorithm 
import java.util.Scanner;
import java.util.Arrays;

public class DijkstraAlgorithm {

    // Method to find the vertex with the minimum distance value that hasn't been processed yet
    static int getMinDistanceVertex(int[] distance, boolean[] processedVertices, int numberOfVertices) {
        int minDistance = Integer.MAX_VALUE;  // Initialize with a large value
        int minVertexIndex = -1;  // Index of the vertex with the minimum distance

        // Search for the vertex with the smallest distance value
        for (int vertex = 0; vertex < numberOfVertices; vertex++) {
            if (!processedVertices[vertex] && distance[vertex] <= minDistance) {
                minDistance = distance[vertex];  // Update minimum distance
                minVertexIndex = vertex;  // Update index of vertex with minimum distance
            }
        }
        return minVertexIndex;
    }

    // Method to implement Dijkstra's algorithm to find the shortest path from the source
    static void dijkstra(int[][] graph, int[] distance, boolean[] processedVertices, int numberOfVertices, int sourceVertex) {
        // Initialize distances and processedVertices
        Arrays.fill(distance, Integer.MAX_VALUE);  // Set all distances to infinity initially
        Arrays.fill(processedVertices, false);  // Mark all vertices as unprocessed

        distance[sourceVertex] = 0;  // Distance from source to itself is 0

        // Find the shortest path for all vertices
        for (int i = 0; i < numberOfVertices - 1; i++) {
            // Get the vertex with the minimum distance value that hasn't been processed yet
            int currentVertex = getMinDistanceVertex(distance, processedVertices, numberOfVertices);

            // Mark the current vertex as processed
            processedVertices[currentVertex] = true;

            // Update distance values of adjacent vertices of the current vertex
            for (int adjacentVertex = 0; adjacentVertex < numberOfVertices; adjacentVertex++) {
                // If the adjacent vertex is unprocessed, has a path from currentVertex, and distance can be minimized
                if (!processedVertices[adjacentVertex] && graph[currentVertex][adjacentVertex] != 0 &&
                        distance[currentVertex] != Integer.MAX_VALUE &&
                        distance[currentVertex] + graph[currentVertex][adjacentVertex] < distance[adjacentVertex]) {
                    // Update the distance to the adjacent vertex
                    distance[adjacentVertex] = distance[currentVertex] + graph[currentVertex][adjacentVertex];
                }
            }
        }
    }

    // Method to print the solution (distances from source vertex)
    static void printSolution(int[] distance, int numberOfVertices) {
        System.out.println("Vertex   Distance from Source");
        for (int i = 0; i < numberOfVertices; i++) {
            System.out.println(i + " \t\t " + distance[i]);
        }
    }

    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);

        // Taking input for number of vertices
        System.out.print("Enter the number of vertices: ");
        int numberOfVertices = sc.nextInt();

        // Initialize the graph matrix (adjacency matrix)
        int[][] graph = new int[numberOfVertices][numberOfVertices];

        // Taking input for the adjacency matrix (graph)
        System.out.println("Enter the adjacency matrix (0 means no edge between vertices):");
        for (int i = 0; i < numberOfVertices; i++) {
            for (int j = 0; j < numberOfVertices; j++) {
                graph[i][j] = sc.nextInt();
            }
        }

        // Taking input for the source vertex
        System.out.print("Enter the source vertex: ");
        int sourceVertex = sc.nextInt();

        // Arrays to store the distance and processed status of vertices
        int[] distance = new int[numberOfVertices];
        boolean[] processedVertices = new boolean[numberOfVertices];

        // Run Dijkstra's algorithm starting from the source vertex
        dijkstra(graph, distance, processedVertices, numberOfVertices, sourceVertex);

        // Print the shortest distances from the source vertex
        printSolution(distance, numberOfVertices);
    }
}

/*
Test Case
Enter the number of vertices: 5
Enter the adjacency matrix (0 means no edge between vertices):
0 10 0 0 0
0 0 5 0 0
0 0 0 15 0
0 0 0 0 20
0 0 0 0 0
Enter the source vertex: 0
Vertex   Distance from Source
0        0
1        10
2        15
3        30
4        50
*/
//Job Scheduling
import java.util.*;

class Job {
    int id;
    int deadline;
    int profit;

    Job(int id, int deadline, int profit) {
        this.id = id;
        this.deadline = deadline;
        this.profit = profit;
    }
}

public class JobScheduling {

    static class JobComparator implements Comparator<Job> {
        public int compare(Job j1, Job j2) {
            return j2.profit - j1.profit;
        }
    }

    public static int jobSequence(Job[] jobs, int n) {
        Arrays.sort(jobs, new JobComparator());
        
        int[] slot = new int[n];
        Arrays.fill(slot, 0);
        
        int[] jobSequence = new int[n];
        int count = 0;
        int totalProfit = 0;

        for (int i = 0; i < n; i++) {
            for (int j = jobs[i].deadline - 1; j >= 0; j--) {
                if (slot[j] == 0) {
                    slot[j] = 1;
                    jobSequence[j] = jobs[i].id;
                    totalProfit += jobs[i].profit;
                    count++;
                    break;
                }
            }
        }

        System.out.println("Scheduled jobs:");
        for (int i = 0; i < n; i++) {
            if (slot[i] == 1) {
                System.out.println("Job ID: " + jobSequence[i] + ", Profit: " + jobs[i].profit);
            }
        }
        System.out.println("Total profit: " + totalProfit);

        return count;
    }

    public static void main(String[] args) {
        Scanner scanner = new Scanner(System.in);

        System.out.print("Enter the number of jobs: ");
        int n = scanner.nextInt();
        
        Job[] jobs = new Job[n];

        System.out.println("Enter job ID, deadline, profit for each job:");
        for (int i = 0; i < n; i++) {
            int id = scanner.nextInt();
            int deadline = scanner.nextInt();
            int profit = scanner.nextInt();
            jobs[i] = new Job(id, deadline, profit);
        }

        jobSequence(jobs, n);
        scanner.close();
    }
}

/*
Test Case 1:
Input:
5
1 2 50
2 1 10
3 2 20
4 1 30
5 3 40

Expected Output:
Scheduled jobs:
Job ID: 1, Profit: 50
Job ID: 5, Profit: 40
Total profit: 90

Test Case 2:
Input:
4
1 4 30
2 1 20
3 2 10
4 1 40

Expected Output:
Scheduled jobs:
Job ID: 4, Profit: 40
Job ID: 1, Profit: 30
Total profit: 70
*/
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 10, 15, 20, 25, 30, 35, 40, 45, 50],
 'Target': [2, 4, 5, 7, 10, 13, 14, 16, 18, 20]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
linreg = LinearRegression()
linreg.fit(X_train, y_train)
# Make predictions
y_pred = linreg.predict(X_test)
# Evaluate model
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f'Mean Squared Error: {mse:.2f}')
print(f'Mean Absolute Error: {mae:.2f}')
print(f'R-squared Score: {r2:.2f}')
//Articulation Points
import java.util.*;

public class ArticulationPoints {
    private int numVertices;
    private int[][] adjacencyMatrix;
    private int[] discoveryTime;
    private int[] lowLinkValue;
    private int timeCounter;
    private Set<Integer> articulationPoints;

    public ArticulationPoints(int numVertices) {
        this.numVertices = numVertices;
        adjacencyMatrix = new int[numVertices][numVertices];
        discoveryTime = new int[numVertices];
        lowLinkValue = new int[numVertices];
        timeCounter = 1;
        articulationPoints = new HashSet<>();
    }

    public void readGraph(Scanner scanner) {
        for (int i = 0; i < numVertices; i++) {
            for (int j = 0; j < numVertices; j++) {
                adjacencyMatrix[i][j] = scanner.nextInt();
            }
            discoveryTime[i] = 0;
        }
    }

    public void findArticulationPoints(int currentVertex, int parentVertex) {
        discoveryTime[currentVertex] = timeCounter;
        lowLinkValue[currentVertex] = timeCounter;
        timeCounter++;
        int childrenCount = 0;

        for (int adjacentVertex = 0; adjacentVertex < numVertices; adjacentVertex++) {
            if (adjacencyMatrix[currentVertex][adjacentVertex] == 1 && discoveryTime[adjacentVertex] == 0) {
                if (parentVertex == -1) childrenCount++;
                findArticulationPoints(adjacentVertex, currentVertex);

                if (parentVertex != -1 && lowLinkValue[adjacentVertex] >= discoveryTime[currentVertex]) {
                    articulationPoints.add(currentVertex);
                }

                lowLinkValue[currentVertex] = Math.min(lowLinkValue[currentVertex], lowLinkValue[adjacentVertex]);
            } else if (adjacencyMatrix[currentVertex][adjacentVertex] == 1 && adjacentVertex != parentVertex) {
                lowLinkValue[currentVertex] = Math.min(lowLinkValue[currentVertex], discoveryTime[adjacentVertex]);
            }
        }

        if (parentVertex == -1 && childrenCount > 1) {
            articulationPoints.add(currentVertex);
        }
    }

    public void printResults() {
        System.out.println("Articulation points: " + articulationPoints);
        System.out.print("Discovery times: ");
        for (int i = 0; i < numVertices; i++) {
            System.out.print(discoveryTime[i] - 1 + " ");
        }
        System.out.println();
    }

    public static void main(String[] args) {
        Scanner scanner = new Scanner(System.in);

        System.out.println("Enter number of vertices:");
        int numVertices = scanner.nextInt();
        ArticulationPoints articulation = new ArticulationPoints(numVertices);

        System.out.println("Enter the adjacency matrix:");
        articulation.readGraph(scanner);

        articulation.findArticulationPoints(0, -1);
        articulation.printResults();
    }
}

// Input for Test Case with 4 vertices
/*
4
0 1 0 0
1 0 1 0
0 1 0 1
0 0 1 0
*/

// Expected Output
/*
Articulation points: [1, 2]
Discovery times: 0 1 2 3
*/

import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
# Sample data
data = {
 'Feature1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
 'Feature2': [5, 10, 15, 20, 25, 30, 35, 40, 45, 50],
 'Target': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
}
df = pd.DataFrame(data)
# Split data into features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and fit model
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# Make predictions
y_pred = logreg.predict(X_test)
# Evaluate model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
print(f'Accuracy: {accuracy:.2f}')
print('Confusion Matrix:')
print(conf_matrix)
print('Classification Report:')
print(class_report)
//Fractional Knapsack
import java.util.*;

class Item {
    int profit;
    int weight;
    float ratio;

    Item(int profit, int weight) {
        this.profit = profit;
        this.weight = weight;
        ratio = (float) (profit / weight);
    }
}

public class Main {
    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);
        int N = sc.nextInt();
        int capacity = sc.nextInt();
        
        Item[] items = new Item[N];
        
        for (int i = 0; i < N; i++) {
            int profit = sc.nextInt();
            int weight = sc.nextInt();
            items[i] = new Item(profit, weight);
        }
        
        float profit = fractionalKnapsack(items, N, capacity);
        System.out.println("Total profit: " + profit);
    }
    
    public static float fractionalKnapsack(Item[] items, int N, int capacity) {
        Arrays.sort(items, Comparator.comparingDouble((Item item) -> item.ratio).reversed());
        
        float totalProfit = 0;
        int currentWeight = 0;
        
        for (Item item : items) {
            if (currentWeight + item.weight <= capacity) {
                currentWeight += item.weight;
                totalProfit += item.profit;
            } else {
                int remaining = capacity - currentWeight;
                totalProfit += item.profit * ((float) remaining / item.weight);
                break;
            }
        }
        return totalProfit;
    }
}

/*
Sample Test Case:

Input:
4
50
60 10
100 20
120 30
80 40

Output:
Total profit: 240.0
*/
//Merge sort
import java.util.*;

public class Main {
    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);
        int N = sc.nextInt();
        int[] array = new int[N];
        int[] tempArray = new int[N];
        for (int i = 0; i < N; i++)
            array[i] = sc.nextInt();
        mergeSort(array, tempArray, 0, N - 1);
        System.out.println("Sorted Elements: ");
        for (int i = 0; i < N; i++) {
            System.out.print(array[i] + " ");
        }
        System.out.println();
    }

    public static void mergeSort(int[] array, int[] tempArray, int low, int high) {
        if (low < high) {
            int mid = low + (high - low) / 2;
            mergeSort(array, tempArray, low, mid);
            mergeSort(array, tempArray, mid + 1, high);
            merge(array, tempArray, low, high, mid);
        }
    }

    public static void merge(int[] array, int tempArray[], int low, int high, int mid) {
        int left = low;
        int right = mid + 1;
        int current = low;
        while (left <= mid && right <= high) {
            if (array[left] <= array[right]) {
                tempArray[current++] = array[left++];
            } else {
                tempArray[current++] = array[right++];
            }
        }
        while (left <= mid) {
            tempArray[current++] = array[left++];
        }
        while (right <= high) {
            tempArray[current++] = array[right++];
        }
        for (int i = low; i <= high; i++) {
            array[i] = tempArray[i];
        }
    }
}

/*
Sample Test Case:

Input:
6
38 27 43 3 9 82

Output:
Sorted Elements:
3 9 27 38 43 82
*/
//Quick Sort Algorithm
import java.util.*;

public class Main {
    public static void main(String[] args) {
        Scanner sc = new Scanner(System.in);
        int N = sc.nextInt();
        int[] array = new int[N];
        for (int i = 0; i < N; i++)
            array[i] = sc.nextInt();
        quickSort(array, 0, N - 1);
        System.out.println("Sorted Elements: ");
        for (int i = 0; i < N; i++) {
            System.out.print(array[i] + " ");
        }
        System.out.println();
    }

    public static int partition(int[] array, int p, int q) {
        int v = array[p];
        int i = p, j = q + 1;
        do {
            do {
                i++;
            } while (i < q && array[i] < v);
            do {
                j--;
            } while (j > p && array[j] > v);
            if (i < j) {
                interchange(array, i, j);
            }
        } while (i < j);
        interchange(array, p, j);
        return j;
    }

    public static void quickSort(int[] array, int p, int q) {
        if (p < q) {
            int j = partition(array, p, q);
            quickSort(array, p, j - 1);
            quickSort(array, j + 1, q);
        }
    }

    public static void interchange(int[] array, int i, int j) {
        int temp = array[i];
        array[i] = array[j];
        array[j] = temp;
    }
}

/*
Sample Test Case:

Input:
5
12 4 7 9 2

Output:
Sorted Elements:
2 4 7 9 12
*/
import java.util.*;

public class NQueens {

    public static void solveNQueens(int n) {
        int[] board = new int[n];
        Arrays.fill(board, -1);
        solve(0, board, n);
    }

    private static void solve(int col, int[] board, int n) {
        if (col == n) {
            printBoard(board, n);
            return;
        }

        for (int row = 0; row < n; row++) {
            if (isSafe(board, col, row, n)) {
                board[col] = row;
                solve(col + 1, board, n);
                board[col] = -1;
            }
        }
    }

    private static boolean isSafe(int[] board, int col, int row, int n) {
        for (int i = 0; i < col; i++) {
            if (board[i] == row || Math.abs(board[i] - row) == Math.abs(i - col)) {
                return false;
            }
        }
        return true;
    }

    private static void printBoard(int[] board, int n) {
        for (int i = 0; i < n; i++) {
            StringBuilder sb = new StringBuilder();
            for (int j = 0; j < n; j++) {
                sb.append(board[i] == j ? 'Q' : '.');
            }
            System.out.println(sb.toString());
        }
        System.out.println();
    }

    public static void main(String[] args) {
        Scanner scanner = new Scanner(System.in);
        System.out.print("Enter the value of n (size of the board): ");
        int n = scanner.nextInt();
        solveNQueens(n);
    }
}
star

Thu Nov 07 2024 07:58:54 GMT+0000 (Coordinated Universal Time)

@rafal_rydz

star

Thu Nov 07 2024 01:11:46 GMT+0000 (Coordinated Universal Time)

@login123

star

Thu Nov 07 2024 01:04:39 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:55:59 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:38:47 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:38:04 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:37:30 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:36:36 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:35:51 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Thu Nov 07 2024 00:29:24 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Wed Nov 06 2024 23:27:34 GMT+0000 (Coordinated Universal Time)

@Jeremicah

star

Wed Nov 06 2024 21:27:39 GMT+0000 (Coordinated Universal Time)

@baamn #trid #batch #windows #file

star

Wed Nov 06 2024 20:26:16 GMT+0000 (Coordinated Universal Time)

@E23CSEU1151

star

Wed Nov 06 2024 20:18:52 GMT+0000 (Coordinated Universal Time)

@E23CSEU1151

star

Wed Nov 06 2024 20:15:35 GMT+0000 (Coordinated Universal Time)

@E23CSEU1151

star

Wed Nov 06 2024 20:10:22 GMT+0000 (Coordinated Universal Time) https://www.dappfort.com/cryptocurrency-exchange-development-company/

@shakthichinnah #javascript #dappfort #cryptocurrencyexchange #crypto #trading #cryptocurrency #blockchain

star

Wed Nov 06 2024 19:42:37 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Wed Nov 06 2024 19:28:47 GMT+0000 (Coordinated Universal Time)

@sagar123

star

Wed Nov 06 2024 19:23:57 GMT+0000 (Coordinated Universal Time)

@E23CSEU1151

star

Wed Nov 06 2024 19:07:49 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:07:27 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:07:03 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:06:46 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:06:29 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:06:12 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:04:15 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:03:01 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:02:43 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:02:10 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 19:01:16 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 19:01:04 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:59:54 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:59:18 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:58:52 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:57:02 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:56:21 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:56:15 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:53:39 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:52:17 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:48:46 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:48:39 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:47:55 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:47:03 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:45:39 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:43:38 GMT+0000 (Coordinated Universal Time)

@login123

star

Wed Nov 06 2024 18:40:46 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:39:40 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:38:04 GMT+0000 (Coordinated Universal Time)

@signup_returns

star

Wed Nov 06 2024 18:29:29 GMT+0000 (Coordinated Universal Time)

@sagar123

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension