Snippets Collections
func upnp_setup():
	var upnp = UPNP.new()
	
	var discover_result = upnp.discover()
	assert(discover_result == UPNP.UPNP_RESULT_SUCCESS, \
		"UPNP Discover Failed! Error %s" % discover_result)

	assert(upnp.get_gateway() and upnp.get_gateway().is_valid_gateway(), \
		"UPNP Invalid Gateway!")

	var map_result = upnp.add_port_mapping(PORT)
	assert(map_result == UPNP.UPNP_RESULT_SUCCESS, \
		"UPNP Port Mapping Failed! Error %s" % map_result)
	
	print("Success! Join Address: %s" % upnp.query_external_address())
//HTML_code
<div class="container">
<div class="row justify-content-center">
<table class="table" id="veri_tablo">
 <thead class="thead-dark">
   <tr>
     <th onclick="sort_data('id')">ID</th>
     <th onclick="sort_data('name')">Name</th>
     <th onclick="sort_data('username')">Username</th>
     <th onclick="sort_data('email')">Email</th>
     <th onclick="sort_data('address')">Adres</th>
   </tr>
 </thead>
</table>
<div>
  </div>


//JS_code
let data =[];
function veriAl() 
{
  fetch('https://jsonplaceholder.typicode.com/users')
   .then(response =>{
   if(!response.ok)
     {
       throw Error("ERROR");
     }
   return response.json();//json fonksiyonunun promise döndürmeyi sağladığı ile ilgili birşey okudum
    //Biz bunu return yaptığımızda ikinci yazılan then e gelir
   })
    .then(veri => {
    /*data=veri;
    console.log(veri);
    const html=data.map(user => {
      return `<table class="user">
      <td> ${user.id}</td>
      <td>${user.name}</td>
      <td>${user.username}</td>
      <td>${user.email}</td>
    <td>${user.address.street}/${user.address.suite}/${user.address.city}</td>
      </table>
     `;
    }).join("");*/
    data=veri;
   veriEkle(data);
}).catch(error => {//promise reddedildiğinde bu callback çalışır
    console.log(error);
  });
}
const sort_data = (field) =>
{
  data.sort((a,b) => {
    let valueA=a[field];
    let valueB= b[field];
    if(valueA<valueB)
      {
        return -1;
      }
    else if(valueB>valueA)
      {
        return 1;
      }
    return 0;            
  })
  console.log("sıralandı"+field+"e göre",data);
  veriEkle(data);
  }
const veriEkle =(array)=>
{
  
  const html=array.map(user =>{
    return `<table class="user">
      <td> ${user.id}</td>
      <td>${user.name}</td>
      <td>${user.username}</td>
      <td>${user.email}</td>
    <td>${user.address.street}/${user.address.suite}/${user.address.city}</td>
      </table>
     `;
    
  }).join("");
   console.log(html); document.querySelector('#veri_tablo').insertAdjacentHTML("afterbegin",html);
}
veriAl();

//HTML_code
<div class="container">
<div class="row justify-content-center">
<table class="table" id="veri_tablo">
 <thead class="thead-dark">
   <tr>
     <th onclick="sort_data('id')">ID</th>
     <th onclick="sort_data('name')">Name</th>
     <th onclick="sort_data('username')">Username</th>
     <th onclick="sort_data('email')">Email</th>
     <th onclick="sort_data('address')">Adres</th>
   </tr>
 </thead>
</table>
<div>
  </div>


//JS_code
let data =[];
function veriAl() 
{
  fetch('https://jsonplaceholder.typicode.com/users')
   .then(response =>{
   if(!response.ok)
     {
       throw Error("ERROR");
     }
   return response.json();//json fonksiyonunun promise döndürmeyi sağladığı ile ilgili birşey okudum
    //Biz bunu return yaptığımızda ikinci yazılan then e gelir
   })
    .then(veri => {
    /*data=veri;
    console.log(veri);
    const html=data.map(user => {
      return `<table class="user">
      <td> ${user.id}</td>
      <td>${user.name}</td>
      <td>${user.username}</td>
      <td>${user.email}</td>
    <td>${user.address.street}/${user.address.suite}/${user.address.city}</td>
      </table>
     `;
    }).join("");*/
    data=veri;
   veriEkle(data);
}).catch(error => {//promise reddedildiğinde bu callback çalışır
    console.log(error);
  });
}
const sort_data = (field) =>
{
  data.sort((a,b) => {
    let valueA=a[field];
    let valueB= b[field];
    if(valueA<valueB)
      {
        return -1;
      }
    else if(valueB>valueA)
      {
        return 1;
      }
    return 0;            
  })
  console.log("sıralandı"+field+"e göre",data);
  veriEkle(data);
  }
const veriEkle =(array)=>
{
  
  const html=array.map(user =>{
    return `<table class="user">
      <td> ${user.id}</td>
      <td>${user.name}</td>
      <td>${user.username}</td>
      <td>${user.email}</td>
    <td>${user.address.street}/${user.address.suite}/${user.address.city}</td>
      </table>
     `;
    
  }).join("");
   console.log(html); document.querySelector('#veri_tablo').insertAdjacentHTML("afterbegin",html);
}
veriAl();

class VolumeProfile
  {
...
public:
   void              VolumeProfile(datetime _from, datetime _to);
                    ~VolumeProfile() {};
   double            GetHVPrice();
   void              Plot();
...
  };
function onOpen() {
  var ui = SpreadsheetApp.getUi();
  ui.createMenu('Custom Menu')
      .addItem('Update Master Summary', 'updateMasterSummaryBasedOnConfiguration')
      .addItem('Update Configuration', 'updateConfiguration')
      .addItem('Update Consolidated Master', 'updateConsolidatedMaster')
      .addToUi();
}

function updateMasterSummaryBasedOnConfiguration() {
  var statusNames = ["RFQ SENT", "PART NUMBER SET UP", "SOURCED", "DEVELOPING", "AWAITING SAMPLE", "SAMPLE RECEIVED", "PIES COLLECTION", "PIES APPROVED", "PIES REJECTED", "PM APPROVED", "PRICING", "COMPLETE", "TERMINATED"];

  var spreadsheet = SpreadsheetApp.getActiveSpreadsheet();
  var masterSheet = spreadsheet.getSheetByName("Master Summary");
  var configSheet = spreadsheet.getSheetByName("Configuration");

  // Clear existing content in Master Summary sheet excluding the first column
  var rangeToClear = masterSheet.getRange("B:ZZ");
  rangeToClear.clear();

  // Get tab names and their statuses from the Configuration sheet
  var rangeData = configSheet.getRange("A:B").getValues();
  var tabNames = [];
  var tabStatuses = [];

  // Populate tabNames and tabStatuses arrays
  for (var i = 0; i < rangeData.length; i++) {
    var tabName = rangeData[i][0];
    var status = rangeData[i][1];
    if (tabName && status) { // Ensure both tab name and status exist
      tabNames.push(tabName);
      tabStatuses.push(status.toLowerCase()); // Convert status to lowercase for consistency
    }
  }

  // Set the headers for active tabs and count status for each tab
  var activeTabs = tabNames.filter(function(_, index) {
    return tabStatuses[index] === "active";
  });

  // Set the headers for active tabs in Master Summary
  var headerRowData = ['Status', 'Total Parts Count'].concat(activeTabs);
  masterSheet.getRange(1, 1, 1, headerRowData.length).setValues([headerRowData]);

  // Create a 2D array to hold all the data to be written to the Master Summary sheet
  var outputData = statusNames.map(function(statusName) {
    return [statusName, 0].concat(new Array(activeTabs.length).fill(0));
  });

  // Add a row for the total counts
  var totalCountsRow = ['TotTotal Parts Count', 0].concat(new Array(activeTabs.length).fill(0));
  outputData.push(totalCountsRow);

  // Iterate over active tabs and count the statuses
  activeTabs.forEach(function(tabName, tabIndex) {
    var sheet = spreadsheet.getSheetByName(tabName);
    if (sheet) {
      var values = sheet.getRange("A:A").getValues().flat();
      var statusCounts = statusNames.reduce(function(counts, status) {
        counts[status] = 0;
        return counts;
      }, {});

      // Count the statuses
      values.forEach(function(value) {
        var upperValue = value.toString().toUpperCase();
        if (statusCounts.hasOwnProperty(upperValue)) {
          statusCounts[upperValue]++;
        }
      });

      // Fill the outputData array with counts
      statusNames.forEach(function(statusName, statusIndex) {
        var count = statusCounts[statusName] || 0;
        outputData[statusIndex][tabIndex + 2] = count; // Insert count into corresponding column
        outputData[statusIndex][1] += count; // Add count to the total column
        totalCountsRow[tabIndex + 2] += count; // Add count to the total row
      });
      totalCountsRow[1] += totalCountsRow[tabIndex + 2]; // Add total of current tab to the grand total
    }
  });

  // Write the collected data to the sheet in one operation
  masterSheet.getRange(2, 1, outputData.length, outputData[0].length).setValues(outputData);
}


function updateConfiguration() {
  var spreadsheet = SpreadsheetApp.getActiveSpreadsheet();
  var configSheet = spreadsheet.getSheetByName("Configuration");

  // Fetch existing sheet names from Configuration sheet
  var existingSheetNames = configSheet.getRange("A2:A").getValues().flat().filter(function(name) {
    return name; // Filter out empty values
  });

  // Fetch all sheet names excluding "Configuration" and "Master Summary"
  var allSheetNames = spreadsheet.getSheets().map(function(sheet) {
    return sheet.getName();
  }).filter(function(name) {
    return name !== "Configuration" && name !== "Master Summary";
  });

  // Filter out existing sheet names from all sheet names
  var newSheetNames = allSheetNames.filter(function(name) {
    return !existingSheetNames.includes(name);
  });

  // Append new sheet names to the Configuration sheet
  if (newSheetNames.length > 0) {
    var startRow = existingSheetNames.length + 2;
    configSheet.getRange(startRow, 1, newSheetNames.length, 1).setValues(newSheetNames.map(function(name) {
      return [name];
    }));

    // Calculate status for new sheet names
    var statusNames = ["RFQ SENT", "PART NUMBER SET UP", "SOURCED", "DEVELOPING", "AWAITING SAMPLE", "SAMPLE RECEIVED", "PIES COLLECTION", "PIES APPROVED", "PIES REJECTED", "PM APPROVED", "PRICING", "COMPLETE", "TERMINATED"];

    for (var k = 0; k < newSheetNames.length; k++) {
      var tabName = newSheetNames[k];
      var isActive = false;

      // Check each status for the current sheet
      for (var i = 0; i < statusNames.length; i++) {
        var status = statusNames[i];
        var count = getCountForStatusInSheet(status, tabName);
        if (count > 0) {
          isActive = true;
          break;
        }
      }

      // Set the status for the current sheet in the Configuration sheet
      var statusCell = configSheet.getRange(startRow + k, 2);
      statusCell.setValue(isActive ? "Active" : "Inactive");
      var statusValidationRule = SpreadsheetApp.newDataValidation()
          .requireValueInList(["Active", "Inactive"], true)
          .build();
      statusCell.setDataValidation(statusValidationRule);
      statusCell.setFontColor(isActive ? "#00FF00" : "#FF0000");
    }
  }
}

function getCountForStatusInSheet(status, sheetName) {
  var sheet = SpreadsheetApp.getActiveSpreadsheet().getSheetByName(sheetName);
  
  // Return 0 if sheet doesn't exist
  if (!sheet) {
    return 0;
  }

  var statusColumn = sheet.getRange("A:A").getValues().flat(); // Assuming statuses are in column A

  // Count occurrences of status
  var count = statusColumn.filter(function(value) {
    return value === status;
  }).length;

  return count;
}

function updateConsolidatedMaster() {
  var statusNames = ["RFQ SENT", "PART NUMBER SET UP", "SOURCED", "DEVELOPING", "AWAITING SAMPLE", "SAMPLE RECEIVED", "PIES COLLECTION", "PIES APPROVED", "PIES REJECTED", "PM APPROVED", "PRICING", "COMPLETE"];
  var columnsToCopy = ["Status", "Start Date", "Part Type", "HOL P/N", "OE#", "ALT OE", "MAM Status (change to Dev)", "FP Status (Change to Electronically Announced)", "PartCat Status (Changed to Electronically Announced)", "Interchange", "Interchange Completion", "Parts List/RFQ Submitted to Warren", "Parts List/RFQ Returned to Holstein", "Production Sourced Part is requested from Warren", "ETA of Sample", "Date Prod Sample Delivered to Holstein", "Factory Code", "MOQ"];

  var spreadsheet = SpreadsheetApp.getActiveSpreadsheet();
  var masterSheet = spreadsheet.getSheetByName("Consolidated Master");
  var configSheet = spreadsheet.getSheetByName("Configuration");

  // Clear existing content in Consolidated Master sheet
  masterSheet.clear();

  // Get tab names and their statuses from the Configuration sheet
  var rangeData = configSheet.getRange("A:B").getValues();
  var activeTabs = rangeData.filter(function(row) {
    return row[1] && row[1].toLowerCase() === "active";
  }).map(function(row) {
    return row[0];
  });

  // Initialize a variable to keep track of the row to insert data into
  var rowIndex = 2;

  // Insert headers for the Consolidated Master sheet
  var headers = columnsToCopy;
  masterSheet.getRange(1, 1, 1, headers.length).setValues([headers]);

  // Iterate through each active tab
  activeTabs.forEach(function(tabName) {
    var sheet = spreadsheet.getSheetByName(tabName);
    if (sheet) {
      var sheetData = sheet.getDataRange().getValues();

      // Get the column names from the second row
      var columnNames = sheetData[1];

      // Iterate through each row in the sheet
      sheetData.forEach(function(row, rowIdx) {
        if (rowIdx === 0 || rowIdx === 1) return; // Skip the header rows

        var status = row[0]; // Assuming status is in the first column (A)
        var statusIndex = statusNames.indexOf(status);

        // If the status is one of the specified statusNames, add it to the Consolidated Master sheet
        if (statusIndex !== -1) {
          var rowData = [status]; // Start with the status in the first column

          // Insert the data into the Consolidated Master sheet
          columnsToCopy.forEach(function(col, colIndex) {
            var colIndexInSheet = columnNames.indexOf(col);
            if (colIndexInSheet !== -1 && rowIdx > 1) {
              rowData.push(row[colIndexInSheet]);
            }
          });

          masterSheet.getRange(rowIndex, 1, 1, rowData.length).setValues([rowData]);
          rowIndex++;
        }
      });
    }
  });
}
function updateConsolidatedRejected() {
  var statusNames = ["TERMINATED", "PIES REJECTED"];
  var columnsToCopy = ["STATUS", "Part Type", "HOL P/N", "OE#", "QC Inspection/PIES Collection", "HOL Feedback Sent", "New Sample Requested", "New Sample Received"];

  var spreadsheet = SpreadsheetApp.getActiveSpreadsheet();
  var masterSheet = spreadsheet.getSheetByName("Consolidated Rejected");
  var configSheet = spreadsheet.getSheetByName("Configuration");

  // Clear existing content in Consolidated Rejected sheet
  masterSheet.clear();

  // Get tab names and their statuses from the Configuration sheet
  var rangeData = configSheet.getRange("A:B").getValues();
  var activeTabs = rangeData.filter(function(row) {
    return row[1] && row[1].toLowerCase() === "active";
  }).map(function(row) {
    return row[0];
  });

  // Initialize a variable to keep track of the row to insert data into
  var rowIndex = 2;

  // Insert headers for the Consolidated Rejected sheet
  var headers = columnsToCopy;
  masterSheet.getRange(1, 1, 1, headers.length).setValues([headers]);

  // Iterate through each active tab
  activeTabs.forEach(function(tabName) {
    var sheet = spreadsheet.getSheetByName(tabName);
    if (sheet) {
      var sheetData = sheet.getDataRange().getValues();

      // Get the column names from the second row
      var columnNames = sheetData[1];

      // Iterate through each row in the sheet
      sheetData.forEach(function(row, rowIdx) {
        if (rowIdx === 0 || rowIdx === 1) return; // Skip the header rows

        var status = row[0]; // Assuming status is in the first column (A)

        // If the status is "TERMINATED" or "PIES REJECTED", add it to the Consolidated Rejected sheet
        if (statusNames.includes(status)) {
          var rowData = [];

          // Insert the data into the Consolidated Rejected sheet
          columnsToCopy.forEach(function(col) {
            var colIndexInSheet = columnNames.indexOf(col);
            if (colIndexInSheet !== -1) {
              rowData.push(row[colIndexInSheet]);
            } else if (col === "STATUS") {
              rowData.push(status); // Add status directly for the STATUS column
            } else {
              rowData.push(''); // Fill in with an empty string if the column is not found
            }
          });

          masterSheet.getRange(rowIndex, 1, 1, rowData.length).setValues([rowData]);
          rowIndex++;
        }
      });
    }
  });
}
//.......................................//

function updateConsolidatedMaster() {
  var statusNames = ["RFQ SENT", "PART NUMBER SET UP", "SOURCED", "DEVELOPING", "AWAITING SAMPLE", "SAMPLE RECEIVED", "PIES COLLECTION", "PIES APPROVED", "PIES REJECTED", "PM APPROVED", "PRICING", "COMPLETE"];
  var columnsToCopy = ["Status", "Start Date", "Part Type", "HOL P/N", "OE#", "ALT OE", "MAM Status (change to Dev)", "FP Status (Change to Electronically Announced)", "PartCat Status (Changed to Electronically Announced)", "Interchange", "Interchange Completion", "Parts List/RFQ Submitted to Warren", "Parts List/RFQ Returned to Holstein", "Production Sourced Part is requested from Warren", "ETA of Sample", "Date Prod Sample Delivered to Holstein", "Factory Code"];

  var spreadsheet = SpreadsheetApp.getActiveSpreadsheet();

  try {
    var masterSheet = spreadsheet.getSheetByName("Consolidated Master");
    var configSheet = spreadsheet.getSheetByName("Configuration");

    // Clear existing content in Consolidated Master sheet
    masterSheet.clear();

    // Get active tab names and their statuses from the Configuration sheet
    var rangeData = configSheet.getRange("A:B").getValues();
    var activeTabs = rangeData.filter(function(row) {
      return row[1] && row[1].toLowerCase() === "active";
    }).map(function(row) {
      return row[0];
    });

    // Initialize variables
    var allData = [];
    var rowIndex = 2;

    // Insert headers
    allData.push(columnsToCopy.concat("MOQ")); // Add MOQ to the header row
    masterSheet.getRange(1, 1, 1, allData[0].length).setValues([allData[0]]);

    // Iterate through active tabs
    activeTabs.forEach(function(tabName) {
      var sheet = spreadsheet.getSheetByName(tabName);
      if (sheet) {
        var sheetData = sheet.getDataRange().getValues();

        // Get column names
        var columnNames = sheetData[1];

        // Iterate through rows (excluding header rows)
        sheetData.slice(2).forEach(function(row) {
          var status = row[0];

          // Check if status is in the list and copy relevant data
          if (statusNames.includes(status)) {
            var rowData = [status];
            columnsToCopy.forEach(function(col) {
              var colIndex = columnNames.indexOf(col);
              if (colIndex !== -1) {
                rowData.push(row[colIndex]);
              }
            });

            // Find MOQ column index and convert date to "1" if found
            var moqIndex = columnNames.indexOf("MOQ");
            if (moqIndex !== -1) {
              var moqValue = row[moqIndex];
              if (typeof moqValue === 'object' && moqValue instanceof Date) {
                rowData.push("1");
              } else {
                rowData.push(moqValue);
              }
            } else {
              rowData.push(""); // Add empty string if MOQ column not found
            }
            allData.push(rowData);
          }
        });
      }
    });

    // Insert all data at once
    if (allData.length > 1) { // Check if there's data to insert
      masterSheet.getRange(rowIndex, 1, allData.length - 1, allData[0].length).setValues(allData.slice(1));
    }
  } catch (error) {
    // Handle errors gracefully (e.g., log error or display message to user)
    console.error("Error occurred:", error);
  }
}


# Evaluating the best model on the test set
best_model = grid_search.best_estimator_
test_score = best_model.score(x_test, y_test)
print('Test set accuracy:', test_score)
pipe = make_pipeline(MinMaxScaler(), PCA(n_components=100), SVC())
param_grid = {
    'svc__kernel': ['rbf', 'linear'],
    'svc__C': [1, 10],
    'svc__gamma': [0.01, 0.1]
}
# Training the initial SVC model
pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC(kernel = 'rbf', C = 10))])
pipe.fit(x_train, y_train)
pipe.score(x_test, y_test)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state = 0)
# Creating a stacked version of the image - original and distorted one
x, y = [], []
for disease_name, training_files in oral_disease_file_names_dict.items():
    for training_image in training_files:
        img = cv2.imread(training_image)
        scaled_orig_img = cv2.resize(img, (40, 30))
        img_har = w2d(img, 'db1', 5)
        scaled_img_har = cv2.resize(img_har, (40, 30))
        stacked_img = np.vstack((scaled_orig_img.reshape(40*30*3,1),scaled_orig_img.reshape(40*30*3,1)))
        x.append(stacked_img)
        y.append(class_dict[disease_name])
/**
 * Show msg (toast or dialog)
 */
function showMsg(text, type='silent') {

  Logger.log(text);
  
  const ss = SpreadsheetApp;

  if (type == 'dialog') {
    ss.getUi().alert(text);
  } else {
    ss.getActiveSpreadsheet().toast(text, 'Status', 60);
  }
}
# Transforming the image to improve detection
def w2d(img, mode='haar', level=1):
    imArray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    coeffs = pywt.wavedec2(imArray, mode, level=level)
    coeffs[0] *= 0
    imArray_H = pywt.waverec2(coeffs, mode)
    imArray_H = np.clip(imArray_H, 0, 255)
    imArray_H = np.uint8(imArray_H)
    return imArray_H
# Creating the labels based on directories
class_dict = {}
count = 0
for disease_name in oral_disease_file_names_dict.keys():
    class_dict[disease_name] = count
    count = count + 1
class_dict
# Collecting file directories into a dictionary
oral_disease_file_names_dict = {}

for img_dir in img_dirs:
    disease_name = img_dir.split('/')[-1]
    oral_disease_file_names_dict[disease_name] = []
    for root, dirs, files in os.walk(img_dir):
        for file in files:
            if file.endswith(('.jpg', '.jpeg', '.png', '.bmp', '.gif')): 
                file_path = os.path.join(root, file)
                oral_disease_file_names_dict[disease_name].append(file_path)
# Scanning the directory using os python function to collect
img_dirs = []
for entry in os.scandir(path_to_data):
    if entry.is_dir():
        img_dirs.append(entry.path)
# Loading the smile cascade classifier
smile_cascade = cv2.CascadeClassifier('kaggle/input/haar-cascades-for-face-detection/haarcascade_smile.xml')
smile = smile_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (30, 30))
for (x, y, w, h) in smile:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
output_path = 'kaggle/working/image_with_teeth.jpg'
cv2.imwrite(output_path, img)
print('Image with detected teeth saved to:', output_path)
img = cv2.imread('kaggle/input/oral-diseases/Calculus/Calculus/(1).jpg')
img.shape
plt.imshow(img)
# Cleaning the index values and aggregating the counts of ingredients

top_ingredient_counts = {}

for ingredient, count in top_ingredient_series_sorted.items():
    top_ingredient_cleaned = ingredient.replace('[', '').replace(']', '').replace("'", "")
    if top_ingredient_cleaned in top_ingredient_counts:
        top_ingredient_counts[top_ingredient_cleaned] += count
    else:
        top_ingredient_counts[top_ingredient_cleaned] = count
top_ingredient_series_cleaned = pd.Series(top_ingredient_counts, dtype = int)
print(top_ingredient_series_cleaned)
# Iterating over rows and counting the occurrences of ingredients listed in the 'TopNotes' column 
top_ingredient_counts = {}

for index, row in df.iterrows():
    top_notes_list = row['TopNotes']
    if isinstance(top_notes_list, list):
        for ingredient in top_notes_list:
            top_ingredient_counts[ingredient] = top_ingredient_counts.get(ingredient, 0) + 1
    elif isinstance(top_notes_list, str):
        ingredients = [x.strip() for x in top_notes_list.split(',')]
        for ingredient in ingredients:
            top_ingredient_counts[ingredient] = top_ingredient_counts.get(ingredient, 0) + 1
top_ingredient_series = pd.Series(top_ingredient_counts, dtype=int)
print(top_ingredient_series)
# Saving the data to the CSV file

with open(csv_file, 'w', newline='', encoding='utf-8') as csvfile:
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()
    for data in all_perfume_data:
        writer.writerow(data)

print('Data saved to:', csv_file)
# Collecting the data from URLs we have gathered

all_perfume_data = []

for link in perfume_links:
    print('Scraping link:', link)
    try:
        perfume_data_new = scrape_perfume_data(link)
        all_perfume_data.append(perfume_data_new)
        print(f'Data scraped successfully for {perfume_data_new["PerfumeName"]}')
    except Exception as e:
        print(f'Error scraping data for {link}: {e}')
# Collecting the urls we want to scrape

perfume_links = scrape_all_pages('https://www.parfumo.com/Recently_added?current_page=', 50)
print(perfume_links)
# This method should parse through every page within the new release directory (theres a total of 50 of them)

def scrape_all_pages(base_url, total_pages):
    all_links = []
    base_url = 'https://www.parfumo.com/Recently_added?current_page='
    end_url = '&'
    total_pages = 50

    for page_number in range(1, total_pages + 1):
        page_url = f"{base_url}{page_number}{end_url}"
        try:
            links_on_page = scrape_perfume_links(page_url)
            all_links.extend(links_on_page)
            print(f"Scraped links from page {page_number}")
        except requests.HTTPError as e:
            print(f"Error scraping page {page_number}: {e}")
        time.sleep(1)
    return all_links
# Defining a method for collecting the urls we want to scrape
def scrape_perfume_links(url):
    headers = {
        "User-Agent": "Insert user agent",
        "Referer": "https://www.parfumo.com/",
        "Accept-Language": "en-US,en;q=0.9"
    }
    session = requests.Session()
    page = session.get(url, headers=headers)
    page.raise_for_status()
    soup3 = BeautifulSoup(page.content, 'html.parser')
    
    perfumes = soup3.find_all('div', class_='name')
    perfume_links = []
    for perfume in perfumes:
        link = perfume.find('a')['href']
        perfume_links.append(link)

    return perfume_links
# Defining a method for scraping the web page
def scrape_perfume_data(url):
    headers = {
        "User-Agent": "Insert user agent"
    }
    page = requests.get(url, headers=headers)
    page.raise_for_status()
    soup = BeautifulSoup(page.content, 'html.parser')
    
    perfume_name = brand_name = release_year = overall_rating = rating_count = perfumer = scent_rating = longevity_rating = sillage_rating = bottle_rating = value_rating = scraping_date = None
    
    try:
        perfume_name = soup.find('h1', class_='p_name_h1', itemprop='name').get_text().strip().split('\n')[0].strip()
    except AttributeError:
        pass
    
    try:
        brand_span = soup.find('span', itemprop='brand')
        brand_name = brand_span.find('span', itemprop='name').get_text().strip()
    except AttributeError:
        pass
    
    try:
        year = soup.find('a', href=lambda href: href and 'Release_Years' in href)
        release_year = year.get_text().strip()
    except AttributeError:
        pass
    
    try:
        overall_rating = soup.find('span', itemprop='ratingValue').get_text().strip()
    except AttributeError:
        pass
    
    try:
        rating_count = soup.find('span', itemprop='ratingCount').get_text().strip()
    except AttributeError:
        pass
    
    try:
        perfumer = soup.find('div', {'class': 'w-100 mt-0-5 mb-3'}).get_text().strip()
    except AttributeError:
        pass
    
    try:
        top_notes = soup.find('div', class_='pyramid_block nb_t w-100 mt-2')
        top_note_list = [span.get_text(strip=True) for span in top_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        heart_notes = soup.find('div', class_='pyramid_block nb_m w-100 mt-2')
        heart_note_list = [span.get_text(strip=True) for span in heart_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        base_notes = soup.find('div', class_='pyramid_block nb_b w-100 mt-2')
        base_note_list = [span.get_text(strip=True) for span in base_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
   
    
    scraping_date = datetime.date.today()
    
    return {
        'PerfumeName': perfume_name, 
        'Brand': brand_name, 
        'ReleaseYear': release_year, 
        'OverallRating': overall_rating, 
        'RatingCount': rating_count, 
        'Perfumer': perfumer, 
        'TopNotes': top_note_list if 'top_note_list' in locals() else None, 
        'HeartNotes': heart_note_list if 'heart_note_list' in locals() else None, 
        'BaseNotes': base_note_list if 'base_note_list' in locals() else None, 
    }
Custom Extraction >> Regex

["']datePublished["']: *["'](.*?)["']
["']dateModified["']: *["'](.*?)["']
class Solution {
public:
    string frequencySort(string s) {
        map<char,int> m1;
        multimap<int,char> m2;
        map<char,int> :: iterator it;
        multimap<int,char>::iterator it1;
        string str = "";
        for(int i=0;i<s.length();++i)
         m1[s[i]]++;
        for(it=m1.begin();it!=m1.end();++it)
        m2.insert({it->second,it->first});
        for(it1=m2.begin();it1!=m2.end();++it1)
         {
            for(int j=it1->first;j>0;--j)
            {str.push_back(it1->second);}
         }
         reverse(str.begin(),str.end());
         return str;
     }
};
[
    {
        "$match": {
            "userStatus": "ACTIVE",
            "partnerShortCode": {
                "$in": [
                    "CHC",
                    "DRB",
                    "DRL",
                    "DUMMY",
                    "GLOBANT",
                    "IHL",
                    "JIVA",
                    "MINDTREE",
                    "MM",
                    "SHUDDHI",
                    "SMITFIT",
                    "SUDLIFE",
                    "test201",
                    "wipro"
                ]
            }
        }
    },
    {
        "$lookup": {
            "from": "generatedparticipantdata",
            "localField": "email",
            "foreignField": "email",
            "as": "generatedParticipantDataLookupLkp"
        }
    },
    {
        "$unwind": {
            "path": "$generatedParticipantDataLookupLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$match": {}
    },
    {
        "$lookup": {
            "from": "subscription",
            "localField": "_id",
            "foreignField": "userId",
            "as": "subscriptionLkp"
        }
    },
    {
        "$project": {
            "subscriptionLkp": {
                "$arrayElemAt": [
                    {
                        "$filter": {
                            "input": "$subscriptionLkp",
                            "as": "subscription",
                            "cond": {
                                "$eq": [
                                    "$$subscription.active",
                                    true
                                ]
                            }
                        }
                    },
                    0
                ]
            },
            "partnerShortCode": 1,
            "firstName": 1,
            "_id": 1,
            "middleName": 1,
            "lastName": 1,
            "email": 1,
            "mobile": 1,
            "source": 1,
            "lastAppLaunchDate": "$userLkp.lastAppLaunchDate",
            "daysSpentOnApp": 1,
            "bmi": "$generatedParticipantDataLookupLkp.bmi",
            "profilePictureURL": 1,
            "startDate":{
                            "$arrayElemAt": [
                                "$subscriptionLkp.startDate",
                                0
                            ]
                        },
            "journeyDays": {
                "$dateDiff": {
                    "startDate": {
                        "$toDate": {
                            "$arrayElemAt": [
                                "$subscriptionLkp.startDate",
                                0
                            ]
                        }
                    },
                    "endDate": {
                        "$date": "2024-05-21T04:04:07.418Z"
                    },
                    "unit": "day"
                }
            }
        }
    },
    {
        "$unwind": {
            "path": "$subscriptionLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$lookup": {
            "from": "subscriptionPlan",
            "localField": "subscriptionLkp.planId",
            "foreignField": "_id",
            "as": "subscriptionPlanLkp"
        }
    },
    {
        "$unwind": {
            "path": "$subscriptionPlanLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$match": {}
    },
    {
        "$match": {
            "journeyDays": {
                "$gte": 200,
                "$lte": 300
            }
        }
    },
    {
        "$lookup": {
            "from": "users",
            "localField": "email",
            "foreignField": "email",
            "as": "userLkp"
        }
    },
    {
        "$unwind": {
            "path": "$userLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$sort": {
            "_id": -1
        }
    },
    {
        "$project": {
            "name": "$subscriptionPlanLkp.name",
            "partnerShortCode": 1,
            "firstName": 1,
            "middleName": 1,
            "lastName": 1,
            "email": 1,
            "mobile": 1,
            "source": 1,
            "lastAppLaunchDate": "$userLkp.lastAppLaunchDate",
            "daysSpentOnApp": 1,
            "bmi": 1,
            "profilePictureURL": 1,
            "servicePartnerShortCode": "$userLkp.servicePartnerShortCode",
            "userVisibility": "$userLkp.userVisibility",
            "journeyDays": 1,
            "startDate":1
        }
    }
]
class Solution {
public:
    bool isIsomorphic(string s, string t) {
        string r;
        if(s.length()!=t.length())
        return false;
        map<char,char> m1,m2,m3;
        for(int i=0;i<s.length();++i)
        {
            m1.insert({s[i],t[i]});
            m2.insert({t[i],s[i]});
        }
        if(m1.size()>=m2.size())
        {m3=m2;
        r=s;}
        else
        {m3=m1;r=t;}
        for(int j=0;j<s.length();++j)
        {
            if(r[j]==m3[t[j]])
            continue;
            else
            return false;
        }
        return true;
        
    }
};
# Showcasing the data for cluster 0

cluster_0_df = df_trimmed[df_trimmed['ClustersK'] == 0]

variable_names = [col for col in cluster_0_df.columns if col != 'ClustersK']
colors = ['#2e2237']
n_variables = len(variable_names)
n_rows = (n_variables - 1) // 5 + 1
fig, axes = plt.subplots(n_rows, 5, figsize=(15, 3 * n_rows), squeeze=False)

for i, variable in enumerate(variable_names):
    row = i // 5
    col = i % 5
    ax = axes[row, col]
    cluster_0_df[variable].plot.hist(ax=ax, bins=20, color=colors)
    ax.set_title(f'Distribution of {variable}')
    ax.set_xlabel(variable)
    ax.set_ylabel('Frequency')

for i in range(n_variables, n_rows * 5):
    fig.delaxes(axes.flatten()[i])
plt.tight_layout()
plt.show()
# Defining the number of clusters (K)
num_clusters = 4

# Initialising and fitting the KMeans model
kmeans = KMeans(n_clusters = num_clusters, n_init = 10)
cluster_labels = kmeans.fit_predict(dim_ds)
df_trimmed['ClustersK'] = cluster_labels
# Initiating PCA to reduce dimensions aka features to 3 and transforming the data
dim_ds = pd.DataFrame(PCA(n_components = 3).fit_transform(df_scaled), columns = ['column1', 'column2', 'column3'])
dim_ds.describe().T
# Applying z-score normalisation
scaler = StandardScaler()

# Fitting the scaler to the data and transforming it
df_scaled = pd.DataFrame(scaler.fit_transform(df_trimmed), columns = df_trimmed.columns)
# Defining a spend column based on individual categories
df['TotalSpend'] = df['Wines'] + df['Fruits'] + df['Meat'] + df['Fish'] + df['Sweets'] + df['Gold']
# Fleshing out the data around parenthood
df['N_minors_home'] = df['Kidhome'] + df['Teenhome']
df['Parent'] = np.where(df.N_minors_home> 0, 1, 0)
# Cleaning the Education column
df['Education'] = df['Education'].replace({'Basic':'Sec school',
                                           '2n Cycle':'Masters', 
                                           'Graduation':'Bachelors', 
                                           'Master':'Masters', 
                                           'PhD':'Doctorate'})
# Cleaning the Marital status column
df['Marital_Status'] = df['Marital_Status'].replace({'Alone':'Single','Widow':'Widowed', 'Together':'Dating'})
df = df[~df['Marital_Status'].isin(['YOLO', 'Absurd'])]
# Exploring the unique values in the Marital status column
df['Marital_Status'].unique()
# Removing outliers
df = df[df['Income'] <= 200000]
df = df.dropna(subset=['Income'])
# Exploring the outliers for age 
customers_over_100 = df[df['Age'] > 100]
num_customers_over_100 = len(customers_over_100)

print('N of customers over the age of 100:', num_customers_over_100)
# Creating an age column based on the year of birth
current_year = dt.datetime.now().year
df['Age'] = current_year - df['Year_Birth']
-- Determining what factors were more prevalent across all collisions
SELECT 
    COUNT(*) AS TOTAL_COLLISIONS,
    (SUM(CASE WHEN INATTENTIONIND = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_INATTENTIONIND,
    (SUM(CASE WHEN UNDERINFL = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_UNDERINFL,
    (SUM(CASE WHEN SPEEDING = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_SPEEDING,
    (SUM(CASE WHEN HITPARKEDCAR = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_HITPARKEDCAR,
    (SUM(CASE WHEN POORDRIVINGCOND = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_POORDRIVINGCOND,
    (SUM(CASE WHEN NIGHTTIME = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_NIGHTTIME,
    (SUM(CASE WHEN intersection_related = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_INTERSECT_RELATED
FROM PortfolioProject.dbo.SeattleCollision;
-- Examining the number of collisions that ended in injuries vs fatalities
SELECT 
    COUNT(*) AS TOTAL_COLLISIONS,
    SUM(CASE WHEN FATALITIES > 0 THEN 1 ELSE 0 END) AS TOTAL_FATAL_COLLISIONS,
    (SUM(CASE WHEN FATALITIES > 0 THEN 1.0 ELSE 0 END) / COUNT(*)) * 100.0 AS PERCENTAGE_FATAL_COLLISIONS,
    SUM(CASE WHEN INJURIES > 0 THEN 1 ELSE 0 END) AS TOTAL_INJURIES,
    (SUM(CASE WHEN INJURIES > 0 THEN 1.0 ELSE 0 END) / COUNT(*)) * 100.0 AS PERCENTAGE_INJURIES
FROM 
    PortfolioProject.dbo.SeattleCollision;
-- Calculating the number of collisions per year and n of people affected
SELECT
    YEAR(DATE_UPD) AS COLLISION_YEAR,
    COUNT(*) AS COLLISIONS_PER_YEAR,
    SUM(PERSONCOUNT) AS TOTAL_AFFECTED
FROM PortfolioProject.dbo.SeattleCollision
GROUP BY YEAR(DATE_UPD)
ORDER BY COLLISIONS_PER_YEAR DESC;
-- Adding more descriptive labels to the Severity column
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD SEVERITY nvarchar(255);
UPDATE PortfolioProject.dbo.SeattleCollision
SET SEVERITY = CASE 
		WHEN SEVERITYCODE = 3 THEN '3—fatality'
		WHEN SEVERITYCODE = 2 THEN '2—injury'
		WHEN SEVERITYCODE = 1 THEN '1—property damage'
		ELSE '0—unknown'
	END;
-- Creating a filter for Nighttime 
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD NIGHTTIME bit;
UPDATE PortfolioProject.dbo.SeattleCollision
SET NIGHTTIME = CASE 
		WHEN TIMEOFDAY = 'Daylight' THEN 0
		ELSE 1
	END;
-- Creating a function for determining Daylight vs Nighttime
CREATE FUNCTION FINDTIMEOFDAY(
    @CollisionDate DATE,
    @CollisionTime TIME)
RETURNS VARCHAR(8)
AS
BEGIN
    DECLARE @MONTHN INT;
    DECLARE @SUNRISE TIME;
    DECLARE @SUNSET TIME;
    SELECT @MONTHN = MONTH(@CollisionDate);
    SELECT @SUNRISE = SUNRISE, @SUNSET = SUNSET
    FROM PortfolioProject.dbo.SeattleSunriseSunset
    WHERE MONTHN = @MONTHN;
    RETURN CASE 
        WHEN @CollisionTime >= @SUNRISE AND @CollisionTime < @SUNSET THEN 'Daylight'
        ELSE 'Night'
    END;
END;
star

Wed May 22 2024 03:14:09 GMT+0000 (Coordinated Universal Time)

@azariel #glsl

star

Wed May 22 2024 02:13:56 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/66967959/how-to-exclude-one-specific-file-from-format-on-save-in-vscode

@kayengxiong

star

Tue May 21 2024 19:40:01 GMT+0000 (Coordinated Universal Time) https://codepen.io/hyperborean17/pen/VwazdoQ

@mohamedahmed123

star

Tue May 21 2024 19:39:22 GMT+0000 (Coordinated Universal Time) https://codepen.io/hyperborean17/pen/VwazdoQ

@mohamedahmed123

star

Tue May 21 2024 16:59:31 GMT+0000 (Coordinated Universal Time)

@taufiq_ali

star

Tue May 21 2024 16:41:04 GMT+0000 (Coordinated Universal Time) https://www.thiscodeworks.com/extension/initializing?newuser

@atang148

star

Tue May 21 2024 16:18:14 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:17:12 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:16:16 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:14:28 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:13:39 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:11:36 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:11:21 GMT+0000 (Coordinated Universal Time)

@coderule #js

star

Tue May 21 2024 16:10:30 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:07:12 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:05:54 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:04:33 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 16:00:14 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:59:21 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:39:30 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:36:30 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:34:24 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:32:46 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:31:35 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:30:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:24:08 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 13:12:44 GMT+0000 (Coordinated Universal Time)

@aguelmann

star

Tue May 21 2024 12:59:54 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 09:24:18 GMT+0000 (Coordinated Universal Time)

@CodeWithSachin #aggregation #mongodb #todate #datediff

star

Tue May 21 2024 09:04:23 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 08:57:06 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:56:03 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:53:00 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:51:32 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:50:35 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:49:39 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:48:22 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:47:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:45:36 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:43:36 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:41:18 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:32:28 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:30:43 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:29:39 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:28:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:25:26 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:22:50 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension