Snippets Collections
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":sunshine: :blinky_stars: Boost Days - What's On This Week :blinky_stars: :sunshine:"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n\n Good morning Melbourne,\n\n Please see what's on for the week below!"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": "Xero Café :coffee:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n :new-thing: *This week we are offering:* \n\n :pretzel: Mini Apple & Cinnamon Danish & Salted Caramel Pretzel Cookies  \n\n :lavender-latte: *Weekly Café Special:* _Lavender Latte_"
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": " Wednesday, 5th March :calendar-date-5:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": " \n\n :lunch: *Light Lunch*: Provided by *Kartel Catering* from *12pm* in the L3 Kitchen & Wominjeka Breakout Space. \n\n"
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": "Thursday, 6th March :calendar-date-6:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":breakfast: *Breakfast*: Provided by *Kartel Catering* from *8:30am - 10:30am* in the Wominjeka Breakout Space. \n\n"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "_*Later this month:*_ \n\n :cheers-9743:  *13th March:* Social Happy Hour \n\n :hands: *19th March:* Global All Hands \n\n :cheers-9743: *27th March:* Social Happy Hour \n\n\n Love, WX :party-wx:"
			}
		}
	]
}
#include <iostream>
using namespace std;

void CheckForPair(int b[], int g[], int m, int n)
{
  int i = 0, j = 0;
  
  while(i < m && j < n)
  {
    if(b[i] <= g[j])
    {
        cout << "No" << endl;
      	return;
    }
    
    ++i;
    ++j;
  }
  
  cout << "Yes" << endl;
}

int Partition(int a[], int low, int high)
{
  int pivot = a[low];
  int i = low;
  int j = high;
  
  while(i < j)
  {
    while(a[i] <= pivot && i <= high-1)
      ++i;
    
    while(a[j] > pivot && j >= low+1)
      --j;
      
    if(i < j)
      swap(a[i], a[j]);
  }
  
  swap(a[low], a[j]);
  return j;
}

void QuickSort(int a[], int low, int high)
{
  if(low < high)
  {
    int pivotIndex = Partition(a, low, high);
    
    QuickSort(a, low, pivotIndex-1);
    QuickSort(a, pivotIndex+1, high);
  }
}

int main() 
{
  int t;
  cin >> t;
  
  while(t--)
  {
    int m, n;
    cin >> m >> n;
    
    if(m > n)
    {
      cout << "No" << endl;
      continue;
    }
    
    int b[m], g[n];
    for(int i = 0; i < m; ++i)
      cin >> b[i];
    
    for(int i = 0; i < n; ++i)
      cin >> g[i];
    
    QuickSort(b, 0, m-1);
    QuickSort(g, 0, n-1);
  
    CheckForPair(b, g, m, n);
  }
  
  return 0;
}
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":sunshine: :blinky_stars: Boost Days: What's on this week :blinky_stars: :sunshine:"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Good morning Brisbane, \n\n Please see below for what's on this week! "
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-3: Monday, 3rd March",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n:coffee: *Café Partnership*: Enjoy free coffee and café-style beverages from our Cafe partner *Edwards*.\n\n :Lunch: *Lunch*: provided by _Etto_ from *12pm* in the kitchen.\n\n:massage:*Wellbeing*: Pilates at *SP Brisbane City* is bookable every Monday! Watch this channel on how to book."
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-5: Wednesday, 5 March",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":coffee: *Café Partnership*: Enjoy free coffee and café-style beverages from our Cafe partner *Edwards*. \n\n:lunch: *Morning Tea*: provided by _Say Cheese_ from *9am* in the kitchen!"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Stay tuned to this channel for more details, check out the <https://calendar.google.com/calendar/u/0?cid=Y19uY2M4cDN1NDRsdTdhczE0MDhvYjZhNnRjb0Bncm91cC5jYWxlbmRhci5nb29nbGUuY29t|*Brisbane Social Calendar*>, and get ready to Boost your workdays!\n\nLove,\nWX Team :party-wx:"
			}
		}
	]
}
*** Settings ***
Resource            ${CURDIR}/../../../../common/resources/salesforce_actions.resource
Resource            ${CURDIR}/../resources/general_functionality_bulk_appointing.resource
Resource            ${CURDIR}/../../bulk_requests/resources/bulk_requests.resource
Resource            ${CURDIR}/../../bulk_requests/resources/queries.resource

Suite Setup         Suite Initialize
Suite Teardown      Teardown Bulk Appointing New

Test Tags           bulk_appointing


*** Variables ***
${BULK_APPOINTMENT_WIZARD_URL}      /lightning/cmp/agentsync__bulkAppointmentWizard?agentsync__wizardStageNumber=3&agentsync__autoSubmitState={auto_submit_state}&agentsync__bulkRequestBatchId={bulk_request_batch_id}
${H3_TEXT}                          //h3[text()='Transaction Submission']
${DATA_ID_TOOLTIP}                  //div[@data-id='autoSubmitDisabledTooltip']
${LIGHTNING_MODAL}                  //lightning-modal
${DISABLED_RADIO_BUTTON}            //span[.//input[@disabled]]
${CREATE_TRANSACTIONS_BUTTON}       //button[text()="Submit Transactions"]
${CANCEL_BUTTON}                    //lightning-modal-footer//button[text()='Cancel']


*** Test Cases ***
Verify Helptext With Auto Submit False
    [Documentation]    Verifies if the Auto Submit option is FALSE
    # Scenario: Verify help text WHEN auto-submit functionality is disabled
    #    GIVEN the auto-submit feature parameter is set to false
    #    WHEN the user navigates to the bulk appointment process settings page
    #    THEN the first radio option should be disabled
    #    AND tooltip should be displayed

    ${wizard_url_with_bulk_request_batch_id}    Format String
    ...    ${BULK_APPOINTMENT_WIZARD_URL}
    ...    bulk_request_batch_id=${INSERTED_BULK_REQUEST_BATCH_ID}    # robotcode: ignore
    ...    auto_submit_state=false

    AS Go To With Base URL    ${wizard_url_with_bulk_request_batch_id}

    AS Wait For Element State Visible    ${H3_TEXT}

    AS Fail If Element Count Is Not Equal    ${DATA_ID_TOOLTIP}    1
    AS Fail If Element Count Is Not Equal    ${DISABLED_RADIO_BUTTON}    1

Verify Helptext With Auto Submit True
    [Documentation]    Verifies if the Auto Submit option is TRUE
    # Scenario: Test the flow WHEN auto-submit functionality is enabled
    #    GIVEN the auto-submit feature parameter is set to true
    #    WHEN the user navigates to the bulk appointment process settings page
    #    THEN both radio option should be enabled
    #    AND no tooltip should be displayed

    ${wizard_url_with_bulk_request_batch_id}    Format String
    ...    ${BULK_APPOINTMENT_WIZARD_URL}
    ...    bulk_request_batch_id=${INSERTED_BULK_REQUEST_BATCH_ID}    # robotcode: ignore
    ...    auto_submit_state=true

    AS Go To With Base URL    ${wizard_url_with_bulk_request_batch_id}
    AS Wait For Element State Visible    ${H3_TEXT}
    AS Fail If Element Count Is Not Equal    ${DATA_ID_TOOLTIP}    0

    #    THEN user clicks on Create Transactions button
    #    AND a modal should open up
    AS Click    ${CREATE_TRANSACTIONS_BUTTON}
    AS Wait For Element State Visible    //lightning-modal
    AS Fail If Element Count Is Not Equal    ${LIGHTNING_MODAL}    1

    #    THEN user clicks on Cancel button, inside the modal
    #    AND the modal should close up
    AS Click    ${CANCEL_BUTTON}
    AS Wait For Element State Hidden    //lightning-modal
    AS Fail If Element Count Is Not Equal    ${DATA_ID_TOOLTIP}    0


*** Keywords ***
Suite Initialize
    ${bulk_request_batch}    Create Dictionary    name=test bulk request batch
    ${bulk_request_batch_id}    Create Bulk Request Batch SS    bulk_request_batch=${bulk_request_batch}
    VAR    ${INSERTED_BULK_REQUEST_BATCH_ID}    ${bulk_request_batch_id}    scope=SUITE
//Parent
:host {
    --font-size-1: 0.625rem;
    --font-size-2: 0.75rem;
    --color-gray-9: #f203ba;
}

//Child
.header-container h3 {
    font-size: 1rem;
    color: var(--color-gray-9);
    margin: 0.25rem 0 0 0;
    line-height: 1.5;
}
Data d1;
   Set COVD.DADEDview;
   Array dx{25} HLTH_DX_CODE_1-HLTH_DX_CODE_25;
   symp = 0;
   do i = 1 to 25 while(not missing(dx{i}));
      if dx{i}=:"R13" then symp = 1;
   end;
   drop i;
   if symp;
run;
Solana stands out as one of the most efficient and highly scalable blockchain platforms in the market. With lightning-fast transaction speeds, low fees, and an energy-efficient consensus mechanism, Solana is the go-to choice for tech innovators seeking to create high-performance decentralized applications (dApps). This solana blockchain is setting new standards in the industry, making it an attractive option for developers and businesses alike.

Key Features of Solana That Developers Prefer

Scalability - Handle thousands of transactions per second with ease.
Speed - Achieve sub-second finality for seamless user experiences.
Cost-Effective - Minimize transaction fees without sacrificing security.
Developer-Friendly - Robust ecosystem of tools and libraries to streamline development.

Creating dApps on Solana - Step-by-Step Guide

Setting Up Your Development Ecosystem
 Begin by configuring your development environment with the necessary tools such as Rust, Solana CLI, and Anchor framework.
Understanding Solana’s Architecture & Programming Model
 Get familiar with Solana’s Rust-based programming model and its unique features, including Proof of History (PoH) and parallel processing.

Deploying Smart Contracts (Programs) & Integrating Wallets
 Develop and deploy smart contracts, commonly referred to as "programs" on Solana, and integrate crypto wallets for seamless transactions.

Testing, Deploying, and Scaling Your Application
 Conduct rigorous testing, deploy the dApp to Solana’s mainnet, and optimize for scalability to handle high user demand.

Use Cases for Solana Blockchain Development

DeFi Applications - Leverage Solana's speed for decentralized finance solutions such as automated market makers (AMMs) and lending platforms.
NFT Marketplaces - Create cost-effective and eco-friendly platforms for trading digital assets.
Gaming Applications - Develop real-time multiplayer blockchain games without lag.
Enterprise Solutions - Enhance business operations with highly secure and scalable blockchain implementations.

Tools and Frameworks for Solana Blockchain Development

Solana provides an extensive set of development tools, including:
Solana CLI - Command-line tools for managing blockchain interactions.
Rust & Anchor Framework - Essential for writing and deploying smart contracts.
Metaplex - NFT marketplace creation framework.
Serum - Decentralized exchange protocol for DeFi applications.

Challenges in Solana Development and How to Overcome Them

Every technology comes with its learning curve, and Solana blockchain is no exception. From mastering Rust to optimizing programs for scalability, developers must navigate various hurdles. Staying updated with Solana’s evolving ecosystem and utilizing community resources can significantly ease development challenges.

Ready to Create on Solana? Hire a Solana Blockchain Developer Today!
  
CoinsQueens distinguishes itself through a combination of expertise and commitment:
Experienced Team: A robust team of over 250 skilled developers and 100+ blockchain experts, ensuring precision and innovation in every project.
Proven Track Record: Successfully delivered over 750 projects, showcasing a deep understanding of blockchain technology and client needs.
Comprehensive Services: Offering end-to-end solutions from consultation to deployment, ensuring a seamless experience for clients.
Client-Centric Approach: Emphasizing transparency, reliability, and scalability, CoinsQueens tailors services to align with specific business objectives.

By partnering with CoinsQueens, businesses can harness the full potential of the Solana blockchain, driving innovation and achieving significant growth in the decentralized ecosystem.

map Books.Create_Items_in_Books(int item)
{
//Getting authtoken and organisation id
books_access = thisapp.Books.Get_Books_Access();
conn_tok = books_access.get("connection");
org_id = books_access.get("organisation_id");
//-------------------------------------------------------------------------
fet_itm = Materials[ID == input.item];
if(fet_itm.Material_Item_Type.Material_Type == "Services")
{
mattype = "service";
}
else
{
mattype = "goods";
}
//info fet_itm.Status;
itmmap = Map();
itmmap.put("name",fet_itm.Part_Description);
//itmmap.put("cf_part_no",fet_itm.Part_No);
//info fet_itm.Part_No;
itmmap.put("rate",ifnull(fet_itm.Selling_Price,0.00));
itmmap.put("description",fet_itm.Specification);
itmmap.put("purchase_description",fet_itm.Specification);
itmmap.put("hsn_or_sac",fet_itm.HSN_SAC);
if(fet_itm.Material_Item_Type.Material_Type == "CWPL Produts")
{
itmmap.put("is_returnable",true);
}
//if the Tracking item is enabled in the zoho books and edit the item is not allowing. issue no.157
if(isblank(fet_itm.Zoho_Books_ID))
{
itmmap.put("item_type","sales_and_purchases");
}
info itmmap;
itmmap.put("product_type",mattype);
itmmap.put("purchase_rate",ifnull(fet_itm.Purchase_Price,0.00));
itmmap.put("unit",fet_itm.Primary_UoM.UOM);
itmmap.put("sku",fet_itm.Part_No);
itmmap.put("cf_mafr_part_no",fet_itm.Mfr_Part_No);
//itmmap.put("cf_manufacturer_name",fet_itm.Manufacturer_Name.Manufacturer_Name);
itmmap.put("cf_brand_name",fet_itm.Brand_Name.Brand_Name);
itmmap.put("status",fet_itm.Status.toLowerCase());
itmmap.put("account_id",ifnull(fet_itm.Sales_Account.Account_ID.toLong(),""));
itmmap.put("purchase_account_id",ifnull(fet_itm.Purchase_Account.Account_ID.toLong(),""));
//Tax Prefrence
item_map_inter = Map();
item_map_inter.put("tax_specification","inter");
item_map_inter.put("tax_type",0);
item_map_inter.put("tax_name",fet_itm.IGST_Details.Tax_Name);
item_map_inter.put("tax_percentage",fet_itm.IGST_Details.Total_Rate);
item_map_inter.put("tax_id",fet_itm.IGST_Details.Zoho_Books_ID);
//Intra Map
item_map_intra = Map();
item_map_intra.put("tax_specification","intra");
item_map_intra.put("tax_type",0);
item_map_intra.put("tax_name",fet_itm.GST_Details.Tax_Name);
item_map_intra.put("tax_percentage",fet_itm.GST_Details.Total_Rate);
item_map_intra.put("tax_id",fet_itm.GST_Details.Zoho_Books_ID);
item_prefer_s = List();
item_prefer_s.add(item_map_inter);
item_prefer_s.add(item_map_intra);
itmmap.put("item_tax_preferences",item_prefer_s);
//Custom Fields
cf_list = List();
cf_map = Map();
cat_mast = Category[ID == fet_itm.Category];
sub_cat_mast = Sub_Category[ID == fet_itm.Sub_Category];
//manu_name = Manufacturer_Master[ID == fet_itm.Manufacturer_Name];
brand_dt = Brand_Master[ID == fet_itm.Brand_Name];
//cf_list = {{"api_name":"cf_material_type","value":fet_itm.Material_Item_Type.Material_Type},{"api_name":"cf_sub_category","value":sub_cat_mast.Sub_Category},{"api_name":"cf_part_no","value":fet_itm.Part_No},{"api_name":"cf_category","value":cat_mast.Category},{"api_name":"cf_mfr_part_no","value":ifnull(fet_itm.Mfr_Part_No,"")}};
cf_list = {{"api_name":"cf_material_type","value":fet_itm.Material_Item_Type.Material_Type},{"api_name":"cf_sub_category","value":sub_cat_mast.Sub_Category},{"api_name":"cf_category","value":cat_mast.Category},{"api_name":"cf_mfr_part_no","value":ifnull(fet_itm.Mfr_Part_No,"")},{"api_name":"cf_classification","value":ifnull(fet_itm.Classification,"")},{"api_name":"cf_link_to_erp","value":"https://creatorapp.zoho.in/carrierwheels/erp/#Report:All_Materials?ID=" + fet_itm.ID}};
itmmap.put("custom_fields",cf_list);
js_map = Map();
js_map.put("JSONString",itmmap.toString());
getzbid = fet_itm.Zoho_Books_ID.tostring();
//info "getzbid" + getzbid;
if(isBlank(getzbid) || isnull(getzbid))
{
resp = zoho.books.createRecord("items",org_id,itmmap,conn_tok);
rescode = resp.get("code").toLong();
}
else
{
resp_get = zoho.books.updateRecord("items",org_id,getzbid,itmmap,conn_tok);
//info "resp_get " + resp_get;
rescode = resp_get.get("code").toLong();
rescode_get = resp_get.get("code").toLong();
if(rescode != 0)
{
resp = zoho.books.createRecord("items",org_id,itmmap,conn_tok);
}
else
{
resp = zoho.books.updateRecord("items",org_id,getzbid,itmmap,conn_tok);
}
}
//info "rescode " + rescode;
resp_Map = Map();
log_type = "Failure";
if(rescode == 0)
{
log_type = "Success";
resp_Map.put("Resp","Success");
resp_Map.put("log_msg",resp);
books_id = resp.toMap().get("item").toMap().get("item_id");
fet_itm.Zoho_Books_ID=books_id;
fet_itm.Books_Sync="Yes";
}
// else
// {
// fet_itm.Books_Sync="No";
// }
//info "resp" + resp;
//info log_type;
//Insert into Log Details Report
ins_log = insert into Log_Files
[
Added_User=zoho.loginuser
Module_Name="Books"
Form_Name="Item"
Reference_NO=fet_itm.Part_No + " - " + fet_itm.Part_Description
Log_Details=resp
Log_Type=log_type
];
//sending error log message
if(log_type == "Failure")
{
resp_Map.put("Resp","Failure");
resp_Map.put("log_msg",resp.get("message"));
//thisapp.Books.sendErrorLog("Item",fet_itm.Item,resp);
}
log = resp_Map.get("log_msg").toString();
return resp_Map;
}
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import cos_sim
import logging


logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class TextEmbedder:
    def __init__(self, api_key: str = None):
        """
        Initialize TextEmbedder with a sentence-transformer model.
        The api_key parameter is kept for backward compatibility but is not used.
        """
        # Load the sentence transformer model (api_key is not needed, kept for compatibility)
        try:
            # Using thenlper/gte-large model as specified
            self.model = SentenceTransformer('thenlper/gte-large')
            logger.info("Successfully loaded sentence-transformers model: thenlper/gte-large")
        except Exception as e:
            logger.error(f"Error loading sentence-transformers model: {str(e)}")
            raise
        
    def _combine_text_features(self, row: pd.Series, text_columns: List[str]) -> str:
        """
        Combine multiple text columns from a series into a single text feature.
        """
        text_values = []
        for col in text_columns:
            if col in row and pd.notna(row[col]):
                text_values.append(f"{col}: {str(row[col])}")
        return " | ".join(text_values)
    
    def get_brand_text_features(self, brand: pd.Series) -> str:
        """
        Extract relevant text features from brand data.
        """
        text_columns = [
            'industry',
            'target_audience',
            'brand_messaging',
            'tone_voice',
            'category_alignment',
            'brand_alignment_keywords',
            'content_type'
        ]
        return self._combine_text_features(brand, text_columns)
    
    def get_influencer_text_features(self, influencer: pd.Series) -> str:
        """
        Extract relevant text features from influencer data.
        """
        text_columns = [
            'category_niche',
            'audience_demographics',
            'audience_interests',
            'content_types'
        ]
        return self._combine_text_features(influencer, text_columns)
    
    def get_embedding(self, text: str) -> np.ndarray:
        """
        Generate embeddings for a text using thenlper/gte-large model.
        """
        try:
            if not text or text.isspace():
                # Return zero vector if text is empty or only whitespace
                return np.zeros(self.model.get_sentence_embedding_dimension())
                
            # Get embedding from sentence-transformers
            embedding = self.model.encode(text)
            return embedding
        except Exception as e:
            logger.error(f"Error getting embedding: {str(e)}")
            # Return zero vector with the correct dimensions for the model
            return np.zeros(self.model.get_sentence_embedding_dimension())
            
    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
        """
        Calculate cosine similarity between brand and influencer text using cos_sim.
        """
        if not brand_text or not influencer_text:
            logger.warning("Empty text provided for similarity calculation")
            return 0.0
            
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
        # Using cos_sim from sentence_transformers.util
        similarity = cos_sim(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0].item()  # Extract the float value from the tensor
        
        return float(similarity)

    def print_detailed_match_analysis(self, brand: pd.Series, influencer: pd.Series, similarity_score: float):
        """
        Print detailed analysis of the match between a brand and influencer.
        """
        print("\n" + "="*80)
      
        print("Brand Details:")
        print(f"  ID: {brand.name}") 
        print(f"  Name: {brand.get('name', 'Unknown Brand')}")
        
        print("\nInfluencer Details:")
        print(f"  ID: {influencer.name}") 
        print(f"  Name: {influencer.get('name', 'Unknown Influencer')}")
        print("-"*80)
        
        print("\nBrand Text Features:")
        brand_text = self.get_brand_text_features(brand)
        for feature in brand_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nInfluencer Text Features:")
        influencer_text = self.get_influencer_text_features(influencer)
        for feature in influencer_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nText Similarity Analysis:")
        print(f"  Score: {similarity_score:.4f}")
        
        print("\nScore Interpretation:")
        if similarity_score >= 0.8:
            print("  Excellent Match (≥0.8):")
            print("  - Very strong text similarity")
            print("  - High potential for successful collaboration")
            print("  - Strong alignment in multiple areas")
        elif similarity_score >= 0.6:
            print("  Good Match (≥0.6):")
            print("  - Significant text similarity")
            print("  - Good potential for collaboration")
            print("  - Notable alignment in key areas")
        elif similarity_score >= 0.4:
            print("  Moderate Match (≥0.4):")
            print("  - Some text similarity")
            print("  - Potential for collaboration with careful consideration")
            print("  - Partial alignment in some areas")
        else:
            print("  Weak Match (<0.4):")
            print("  - Limited text similarity")
            print("  - May need to reconsider match")
            print("  - Limited alignment in key areas")
        
        print("="*80)

    def get_text_similarity_matrix(self, brands_df: pd.DataFrame, 
                                 influencers_df: pd.DataFrame) -> np.ndarray:
        """
        Calculate text similarity matrix between all brands and influencers.
        """
        similarity_matrix = np.zeros((len(brands_df), len(influencers_df)))
        
        print("\nCalculating Text Similarity Scores:")
        print("="*80)
        
        all_scores = []
        
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                similarity_matrix[brands_df.index.get_loc(i),
                                influencers_df.index.get_loc(j)] = similarity
                
                all_scores.append({
                    'brand_id': brand.name, 
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity
                })
                
                self.print_detailed_match_analysis(brand, influencer, similarity)
                
                completed += 1
                if completed % 10 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} comparisons ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        
        print("\nTop 10 Text Similarity Matches:")
        print("="*80)
        print(scores_df[['brand_id', 'brand_name', 'influencer_id', 'influencer_name', 'similarity_score']].head(10).to_string(index=False))
        print("="*80)
        
        return similarity_matrix

    def save_similarity_scores(self, brands_df: pd.DataFrame, 
                             influencers_df: pd.DataFrame,
                             output_path: str):
        """
        Calculate and save all similarity scores to a CSV file.
        """
        all_scores = []
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        logger.info(f"Starting to calculate similarity scores for {total_comparisons} brand-influencer pairs")
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                
                all_scores.append({
                    'brand_id': brand.name,
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity,
                    'brand_text': brand_text,
                    'influencer_text': influencer_text
                })
                
                completed += 1
                if completed % 20 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        scores_df.to_csv(output_path, index=False)
        logger.info(f"Saved detailed similarity scores to {output_path}")
DappFort delivers world-class P2P crypto exchange development, helping businesses enter the digital asset market with confidence. Our platforms feature advanced matching engines, dispute resolution, and AI-driven analytics for seamless trading. With multi-currency and multi-payment support, we enhance user accessibility. Get started today and dominate the crypto exchange industry!

Instant Reach Experts: 
Visit us : https://www.dappfort.com/cryptocurrency-exchange-development-company/
Contact : +91 8838534884 
Mail : sales@dappfort.com
Maximize your crypto gains with Dappfort high-performance Crypto Trading Bot Development services. Our bots integrate with major exchanges, use AI-driven strategies, and provide real-time analytics. Trade smarter, faster, and with reduced risks. Let’s build your profit-generating trading bot now!

Instant Reach Experts:

Contact : +91 8838534884 
Mail : sales@dappfort.com
Transform your trading experience with our powerful Algo Trading Software Development solutions. Our AI-powered algorithms analyze market trends, execute trades with precision, and minimize risks. Whether for crypto, forex, or stocks, we deliver high-performance automation. Boost your profits with algorithmic trading—get started now!
  
Visit us : https://www.dappfort.com/blog/algo-trading-software-development/   

Instant Reach Experts:

Contact : +91 8838534884 
Mail : sales@dappfort.com
void Books.Create_Vendor_to_Books(int ven)
{
	books_conn = "books_con14";
	fetch_ven = Vendor[ID == input.ven];
	info fetch_ven;
	//fet_en = Zoho_Books_Entity[Entity == fetch_ven.Entity.Entity].Org_ID;
	ven_des = Destination[ID == fetch_ven.Place_of_Supply];
	ven_pay = Payment_Terms[ID == fetch_ven.Payment_Terms];
	curr_code = Currency_Code[ID == fetch_ven.Currency_Code];
	ven_gst_trm = GST_Treatment[ID == fetch_ven.GST_Treatment];
	// Mapping
	vendormap = Map();
	vendormap.put("contact_name",fetch_ven.Vendor_Name);
	vendormap.put("contact_type","vendor");
	vendormap.put("company_name",fetch_ven.Vendor_Name);
	vendormap.put("mobile",fetch_ven.Mobile);
	vendormap.put("phone",fetch_ven.Phone_Number);
	if(fetch_ven.Entity.Entity == "Marine Mechanics Pvt Ltd, India")
	{
		vendormap.put("pan_no",fetch_ven.PAN_No.trim());
		vendormap.put("gst_no",fetch_ven.GST_No.trim());
		vendormap.put("place_of_contact",ven_des.Short_Name);
		vendormap.put("gst_treatment",ven_gst_trm.Link_name);
		vendormap.put("payment_terms_label",ven_pay.Stages);
	}
	vendormap.put("currency_code",curr_code.Currency_Code);
	cont_list = List();
	primary_cont_pers = Map();
	primary_cont_pers.put("first_name",fetch_ven.Vendor_Name);
	primary_cont_pers.put("phone",fetch_ven.Phone_Number);
	primary_cont_pers.put("email",fetch_ven.Email_ID1);
	cont_list.add(primary_cont_pers);
	//secndary Contact persons updated.
	if(fetch_ven.Secondary_Contact_Person_s_Details != null)
	{
		for each  contacts_val in fetch_ven.Secondary_Contact_Person_s_Details
		{
			cont_pers = Map();
			cont_pers.put("first_name",contacts_val.Contact_Person_Name);
			cont_pers.put("phone",contacts_val.Phone_Number);
			cont_pers.put("email",contacts_val.Email);
			cont_list.add(cont_pers);
		}
	}
	vendormap.put("contact_persons",cont_list);
	bill_add = Map();
	bill_add.put("address",fetch_ven.Billing_Address.address_line_1);
	bill_add.put("street2",fetch_ven.Billing_Address.address_line_2);
	bill_add.put("city",fetch_ven.Billing_Address.district_city);
	bill_add.put("state",fetch_ven.Billing_Address.state_province);
	bill_add.put("zip",fetch_ven.Billing_Address.postal_Code);
	bill_add.put("country",fetch_ven.Billing_Address.country);
	vendormap.put("billing_address",bill_add);
	shipp_add = Map();
	shipp_add.put("address",fetch_ven.Shipping_Address.address_line_1);
	shipp_add.put("street2",fetch_ven.Shipping_Address.address_line_2);
	shipp_add.put("city",fetch_ven.Shipping_Address.district_city);
	shipp_add.put("state",fetch_ven.Shipping_Address.state_province);
	shipp_add.put("zip",fetch_ven.Shipping_Address.postal_Code);
	shipp_add.put("country",fetch_ven.Shipping_Address.country);
	vendormap.put("shipping_address",shipp_add);
	vendormap.put("status",fetch_ven.Status);
	for each  rec in fetch_ven.Vendor_Entities
	{
		resp = zoho.books.createRecord("contacts",rec.Org_ID.Org_ID.toString(),vendormap,books_conn);
		res_code = resp.get("code").toLong();
		books_id = resp.toMap().get("contact").toMap().get("contact_id");
		rec.Zoho_book_ID=books_id;
	}
	info resp;
	res_code = resp.get("code").toLong();
	if(res_code == 0)
	{
		books_id = resp.toMap().get("contact").toMap().get("contact_id");
		fetch_ven.ZOHO_Books_ID=books_id;
		contact_person_list = List();
		contact_person_list = resp.toMap().get("contact").toMap().get("contact_persons").toList();
		for each  contacts_1 in contact_person_list
		{
			contact_rec = contacts_1.toMap();
			contact_Email = contact_rec.get("email");
			contact_person_id = contact_rec.get("contact_person_id");
			if(fetch_ven.Email_ID == contact_Email)
			{
				fetch_ven.contactPerson_Books_ID=contact_person_id;
			}
			else
			{
				updateContactPersonID = Contact_Person_Subform[Vendor_Exis_ID == input.ven && Email == contact_Email];
				if(updateContactPersonID.count() > 0)
				{
					updateContactPersonID.Contact_Person_Books_ID=contact_person_id;
				}
			}
		}
	}
	//Insert into Log Details Report
	ins_log = insert into Log_Files
	[
		Added_User=zoho.loginuser
		Module_Name="Books"
		Form_Name="Vendors"
		Log_Details=resp
		Reference_NO=fetch_ven.Vendor_ID
	];
}
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
import logging


logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class TextEmbedder:
    def __init__(self, api_key: str = None):
        """
        Initialize TextEmbedder with a sentence-transformer model.
        The api_key parameter is kept for backward compatibility but is not used.
        """
        # Load the sentence transformer model (api_key is not needed, kept for compatibility)
        try:
            # all-MiniLM-L6-v2 is a good balance of speed and performance
            self.model = SentenceTransformer('all-MiniLM-L6-v2')
            logger.info("Successfully loaded sentence-transformers model: all-MiniLM-L6-v2")
        except Exception as e:
            logger.error(f"Error loading sentence-transformers model: {str(e)}")
            raise
        
    def _combine_text_features(self, row: pd.Series, text_columns: List[str]) -> str:
        """
        Combine multiple text columns from a series into a single text feature.
        """
        text_values = []
        for col in text_columns:
            if col in row and pd.notna(row[col]):
                text_values.append(f"{col}: {str(row[col])}")
        return " | ".join(text_values)
    
    def get_brand_text_features(self, brand: pd.Series) -> str:
        """
        Extract relevant text features from brand data.
        """
        text_columns = [
            'industry',
            'target_audience',
            'brand_messaging',
            'tone_voice',
            'category_alignment',
            'brand_alignment_keywords',
            'content_type'
        ]
        return self._combine_text_features(brand, text_columns)
    
    def get_influencer_text_features(self, influencer: pd.Series) -> str:
        """
        Extract relevant text features from influencer data.
        """
        text_columns = [
            'category_niche',
            'audience_demographics',
            'audience_interests',
            'content_types'
        ]
        return self._combine_text_features(influencer, text_columns)
    
    def get_embedding(self, text: str) -> np.ndarray:
        """
        Generate embeddings for a text using sentence-transformers.
        """
        try:
            if not text or text.isspace():
                # Return zero vector if text is empty or only whitespace
                return np.zeros(self.model.get_sentence_embedding_dimension())
                
            # Get embedding from sentence-transformers
            embedding = self.model.encode(text)
            return embedding
        except Exception as e:
            logger.error(f"Error getting embedding: {str(e)}")
            # Return zero vector with the correct dimensions for the model
            return np.zeros(self.model.get_sentence_embedding_dimension())
            
    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
        """
        Calculate cosine similarity between brand and influencer text.
        """
        if not brand_text or not influencer_text:
            logger.warning("Empty text provided for similarity calculation")
            return 0.0
            
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
        similarity = cosine_similarity(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0]
        
        return float(similarity)

    def print_detailed_match_analysis(self, brand: pd.Series, influencer: pd.Series, similarity_score: float):
        """
        Print detailed analysis of the match between a brand and influencer.
        """
        print("\n" + "="*80)
      
        print("Brand Details:")
        print(f"  ID: {brand.name}") 
        print(f"  Name: {brand.get('name', 'Unknown Brand')}")
        
        print("\nInfluencer Details:")
        print(f"  ID: {influencer.name}") 
        print(f"  Name: {influencer.get('name', 'Unknown Influencer')}")
        print("-"*80)
        
        print("\nBrand Text Features:")
        brand_text = self.get_brand_text_features(brand)
        for feature in brand_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nInfluencer Text Features:")
        influencer_text = self.get_influencer_text_features(influencer)
        for feature in influencer_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nText Similarity Analysis:")
        print(f"  Score: {similarity_score:.4f}")
        
        print("\nScore Interpretation:")
        if similarity_score >= 0.8:
            print("  Excellent Match (≥0.8):")
            print("  - Very strong text similarity")
            print("  - High potential for successful collaboration")
            print("  - Strong alignment in multiple areas")
        elif similarity_score >= 0.6:
            print("  Good Match (≥0.6):")
            print("  - Significant text similarity")
            print("  - Good potential for collaboration")
            print("  - Notable alignment in key areas")
        elif similarity_score >= 0.4:
            print("  Moderate Match (≥0.4):")
            print("  - Some text similarity")
            print("  - Potential for collaboration with careful consideration")
            print("  - Partial alignment in some areas")
        else:
            print("  Weak Match (<0.4):")
            print("  - Limited text similarity")
            print("  - May need to reconsider match")
            print("  - Limited alignment in key areas")
        
        print("="*80)

    def get_text_similarity_matrix(self, brands_df: pd.DataFrame, 
                                 influencers_df: pd.DataFrame) -> np.ndarray:
        """
        Calculate text similarity matrix between all brands and influencers.
        """
        similarity_matrix = np.zeros((len(brands_df), len(influencers_df)))
        
        print("\nCalculating Text Similarity Scores:")
        print("="*80)
        
        all_scores = []
        
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                similarity_matrix[brands_df.index.get_loc(i),
                                influencers_df.index.get_loc(j)] = similarity
                
                all_scores.append({
                    'brand_id': brand.name, 
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity
                })
                
                self.print_detailed_match_analysis(brand, influencer, similarity)
                
                completed += 1
                if completed % 10 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} comparisons ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        
        print("\nTop 10 Text Similarity Matches:")
        print("="*80)
        print(scores_df[['brand_id', 'brand_name', 'influencer_id', 'influencer_name', 'similarity_score']].head(10).to_string(index=False))
        print("="*80)
        
        return similarity_matrix

    def save_similarity_scores(self, brands_df: pd.DataFrame, 
                             influencers_df: pd.DataFrame,
                             output_path: str):
        """
        Calculate and save all similarity scores to a CSV file.
        """
        all_scores = []
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        logger.info(f"Starting to calculate similarity scores for {total_comparisons} brand-influencer pairs")
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                
                all_scores.append({
                    'brand_id': brand.name,
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity,
                    'brand_text': brand_text,
                    'influencer_text': influencer_text
                })
                
                completed += 1
                if completed % 20 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        scores_df.to_csv(output_path, index=False)
        logger.info(f"Saved detailed similarity scores to {output_path}")
#include <iostream>
#include <unordered_set>
using namespace std;

void FindNext(int a[], int b[], int n, int q)
{
  unordered_set<int> seen(a, a+n);
  
  for(int i = 0; i < q; ++i)
  {
    int nextVal = b[i] + 1;
    if(seen.find(nextVal) != seen.end())
      ++nextVal;
      
    b[i] = nextVal;
  }
  
  for(int i = 0; i < q; ++i)
    cout << b[i] << " ";
}

int main() 
{
  int n, q;
  cin >> n >> q;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  int b[q];
  for(int i = 0; i < q; ++i)
    cin >> b[i];
  
  FindNext(a, b, n, q);
  
  return 0;
}
#include <iostream>
using namespace std;

int Partition(int a[], int low, int high)
{
  int pivot = a[low];
  int i = low; 
  int j = high;
  
  while(i < j)
  {
    while(a[i] >= pivot && i <= high - 1)
      ++i;
      
    while(a[j] < pivot && j >= low + 1)
      --j;
      
    if(i < j)
      swap(a[i], a[j]);
  }
  
  swap(a[low], a[j]);
  return j;
}

void QuickSort(int a[], int low, int high)
{
  if(low < high)
  {
    int pivotIndex = Partition(a, low, high);
    
    QuickSort(a, low, pivotIndex - 1);
    QuickSort(a, pivotIndex + 1, high);
  }
}

int main() 
{
  int n;
  cin >> n;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  QuickSort(a, 0, n-1);
  
  for(int i = 0; i < n; ++i)
    cout << a[i] << " ";
  
  return 0;
}
public with sharing class facturaSAPToHUB {
    public String OppId {get; set;}
    private Opportunity OppObject=null;
	private Account AccObject=null;
	private Contact ContactObject=null;
	private User UserObject=null;
    private String IVAP0 ='IVAP0';
	private String IVAP16 = 'IVAP16';

    public facturaSAPToHUB(String Id) {
        this.OppId=Id;
		FillOpportunity();
    }

    private void FillOpportunity(){
		Opportunity opp;
		List<Opportunity> opportunityList = [Select Id,AccountId ,OwnerId, Name,E_mail_para_env_o_de_Factura_Elect_nica__c,FormaDePago_o__c,CurrencyIsoCode,
											 		 FechaFactura_o__c, Comentarios__c,MembresaPagadaCon_o__c, Referencia_Numerica_del__c,InicioVigencia_o__c,
											 		FinVigencia_o__c, Tipo_de_Facturacion__c,EstatusFactura__c,CreatedDate,TipoCambio_o__c, Importe_Descuento__c,
													NumFact__c, Referencia_Bancaria__c,Orden_de_Compra__c,N_mero_de_Proveedor__c, Observaciones_SAP__c, 
													Clasificaci_n_de_Factura__c,Banco__c, Importe__c, Referencia_Banc__c,NunOfertafisica_o__c,Estatus_de_pago__c,
													Fecha_de_Pago_Bancario__c,Concepto_de_Cobro__c,FechaPago__c, Cambio_Datos_de_Facturaci_n__c, Clave_metodo_pago__c, 
													N_mero_de_Cuenta_Pago__c, Folio_Oportunidad_sustituida__c, N_Contable_en_SAP_Sustituida__c, Sustituir_Datos__c, EjecutivoAsig_o__c,
													Id_pedido__c, IdPago__c, codigo_error_SAP__c, Mensaje_error_de_SAP__c, Factura_Aprobada__c, Fecha_de_Pedido__c, 
													Mensaje_de_error_pedido__c, Error_al_crear_pedido__c, IdBillingOcc__c, IsBillingOccText__c, ClienteSAP_venta_anterior__c, Hub_Sale__c, Navision_Draft_ID__c
											 	From Opportunity  where  Id =:this.OppId  LIMIT 1];
		if (opportunityList !=null ){
			this.OppObject = opportunityList[0];
			// system.debug('this.IdPedido');
			// system.debug(this.IdPedido);
		}
	}
    private Integer getAliasVentas(String id){
		List<User> ejecutivoList = [Select Alias from User where Id in (Select EjecutivoAsig_o__c From Opportunity  where  Id =: id)];
		Integer ejecutivoName=-1;
		try{
			if(ejecutivoList.size()!=0){
				User ejecutivo = ejecutivoList[0];
				ejecutivoName =Integer.valueOf(ejecutivo.Alias); 
			}	
		}catch(System.TypeException e){
			return -1;
		}
		return ejecutivoName;
	}

    private void FillAccount(){
		Account acc=null;
		// List<Account> accountList;
		String AccountId;

		system.debug('Facturo a una cuenta facturadora ' + this.OppObject.isBillingOccText__c);
		system.debug('Facturo a la cuenta de la venta ' + this.OppObject.AccountId);
		//validatation for crossaccount to invoice correct account
		if (this.OppObject.IsBillingOccText__c != null && this.OppObject.IsBillingOccText__c != '') {
			AccountId = this.OppObject.IsBillingOccText__c;
		} else  {
			AccountId = this.OppObject.AccountId;
		}

		List<Account> accountList = [Select Clave_SAP__c,RazonSoc_o__c,Id, Personamf__c,Name, Phone,Telefono2_o__c,Fax,
											RFC_o__c,Website,Cliente_SAE__c,PaisFact_o__c, No_de_interior__c, No_de_Exterior__c, EstadoFact_o__c,
											 DelegMunicFact_o__c, Colonia_de_Facturaci_n__c, CodigoPostalFact_o__c, CiudadFact_opcional_o__c,
											 CalleFact_o__c, Cuenta_Virtual_Banamex__c, Correo_para_envio_factura__c, Regimen_Fiscal__c, Tipo_de_uso_de_CFDI__c
										 From Account  where  Id =: AccountId LIMIT 1];


		if (accountList !=null )
			 this.AccObject = accountList[0];
	}

    	
	private void FillContact(){
		List<Contact> contactList = [Select Title, Phone, OtherPhone ,MobilePhone, FirstName, LastName,
												Fax, Email,Id From Contact c where Id in(Select ContactId 
											From OpportunityContactRole o where  OpportunityId =:This.OppObject.Id and IsPrimary=true )];
		Contact contacto = null;
		if(contactList.size()!=0){
			this.ContactObject = contactList[0];
		}else{
			this.ContactObject = new Contact();
			this.ContactObject.FirstName='Nombre';
			this.ContactObject.LastName='Apellido';
			this.ContactObject.Phone='1234567890';
			this.ContactObject.Title= 'Puesto';
			this.ContactObject.Phone='1234567890';
			this.ContactObject.MobilePhone='9012345678';
			this.ContactObject.Email='correo@correo.com';
			//Validar si son requerido y que se ha estado enviando
		}
	}
	private void FillUser(){
		List<User> usersList = [SELECT Id, Name from User where  Id =: This.OppObject.OwnerId LIMIT 1];  
		this.UserObject  = usersList[0];
	}

    public  InvoiceSapClases.InvoiceSapDocument FillInvoiceSAPDocument (){
        InvoiceSapClases.InvoiceSapDocument sapContract;
        boolean IsSustitution=false;

		String comentarioVigencia = 'Vigencia del ' + Utils.formatDate(this.OppObject.InicioVigencia_o__c) + ' al '+ Utils.formatDate(this.OppObject.FinVigencia_o__c);
		
		// String actualizaContacto='';
		// //system.debug('account.Cambio_Datos_de_Facturaci_on__c: ' + this.OppObject.Cambio_Datos_de_Facturaci_n__c);
		// if (this.OppObject.Cambio_Datos_de_Facturaci_n__c==false){actualizaContacto='False';}else{actualizaContacto='True';}
			
		List<OpportunityLineItem> oppLines = [SELECT UnitPrice, TotalPrice, Quantity, PricebookEntryId, Discount from OpportunityLineItem where  OpportunityId =: this.OppObject.Id];
		List<InvoiceSapClases.InvoiceSapDocumentLine> documentLines= new List<InvoiceSapClases.InvoiceSapDocumentLine>();
		InvoiceSapClases.InvoiceSapDocumentLine documentLine;
		double discount=0.0;
		if(oppLines.size() > 0){
			for(OpportunityLineItem oppTemp : oppLines)
			{
				if (oppTemp.Discount==null){
					discount=0.0;
				}
				else {
					discount=oppTemp.Discount;
				}
				PricebookEntry priceBook = [SELECT ProductCode 
													FROM PricebookEntry 
													WHERE  Id =: oppTemp.PricebookEntryId
													LIMIT 1];
					documentLine= new InvoiceSapClases.InvoiceSapDocumentLine(priceBook.ProductCode,oppTemp.Quantity*1.0,oppTemp.UnitPrice,discount);
					documentLines.add(documentLine);
				}
			}
        string TipoRelacion='';
        string UUIDRelacion = '';
           
        if (this.OppObject.Sustituir_Datos__c == true){
            TipoRelacion='04';
          	UUIDRelacion = this.OppObject.N_Contable_en_SAP_Sustituida__c;
        }
		String importeFactura='';
		String fechaPagoBanco='';
		if (this.OppObject.Importe__c!=null)
		{
			importeFactura=this.OppObject.Importe__c.toPlainString();
		}
		if (this.OppObject.Fecha_de_Pago_Bancario__c!=null)
		{
			fechaPagoBanco=Utils.formatDate(this.OppObject.Fecha_de_Pago_Bancario__c);
		}
		InvoiceSapClases.InvoiceSapBusinessPertner bp= GetInvoiceSapBusinessPartner();
		InvoiceSapClases.InvoiceSapDocument dc=
			new InvoiceSapClases.InvoiceSapDocument(this.AccObject.Clave_SAP__c, this.OppObject.FechaFactura_o__c, this.OppObject.FechaPago__c, this.OppObject.InicioVigencia_o__c,
					this.OppObject.FinVigencia_o__c,comentarioVigencia,UtilsV2.obtainFacturaType(this.OppObject.Tipo_de_Facturacion__c), UtilsV2.obtainFacturaStatus(this.OppObject.EstatusFactura__c),
					this.OppObject.E_mail_para_env_o_de_Factura_Elect_nica__c,this.OppObject.CreatedDate,this.UserObject.Name,'',0,Utils.getCurrency(this.OppObject.CurrencyIsoCode),this.OppObject.NumFact__c, 
					this.OppObject.Referencia_Bancaria__c,Utils.getInvoiceType(this.OppObject.Tipo_de_Facturacion__c),this.OppObject.Orden_de_Compra__c,this.OppObject.N_mero_de_Proveedor__c,
					TipoRelacion,UUIDRelacion,Utils.getMetodoPagoSAT(this.OppObject.Clave_metodo_pago__c),this.AccObject.Cuenta_Virtual_Banamex__c,this.OppObject.N_mero_de_Cuenta_Pago__c, 
					Utils.getInvoiceClassification(this.OppObject.Clasificaci_n_de_Factura__c), Utils.getInvoiceStatus(this.OppObject.EstatusFactura__c), this.OppObject.Observaciones_SAP__c,
					0, this.OppObject.ClienteSAP_venta_anterior__c, bp, documentLines);
                    sapContract = dc;
		return sapContract; 
	}
    private InvoiceSapClases.InvoiceSapBusinessPertner GetInvoiceSapBusinessPartner (){
		
		Integer ejecutivoName=getAliasVentas(This.OppObject.EjecutivoAsig_o__c);
		String actualizaContacto='';
		if (This.OppObject.Cambio_Datos_de_Facturaci_n__c==false){actualizaContacto='False';}else{actualizaContacto='True';}
		InvoiceSapClases.InvoiceSapAddress ad= 
			new InvoiceSapClases.InvoiceSapAddress( this.AccObject.CalleFact_o__c,this.AccObject.Colonia_de_Facturaci_n__c,this.AccObject.CodigoPostalFact_o__c, '', 
													this.AccObject.DelegMunicFact_o__c,this.AccObject.No_de_Exterior__c, this.AccObject.No_de_interior__c,
													UtilsV2.getCatalogKey('Estados',this.AccObject.EstadoFact_o__c), UtilsV2.getCatalogKey('Paises',this.AccObject.PaisFact_o__c));
		system.debug('ad');
		system.debug(ad);
		InvoiceSapClases.InvoiceSapContact sc= 
			new InvoiceSapClases.InvoiceSapContact(This.ContactObject.FirstName + ' ' + This.ContactObject.LastName, This.ContactObject.Title, This.ContactObject.Phone, This.ContactObject.MobilePhone, This.ContactObject.Email);
		system.debug('sc');
		system.debug(sc);
		InvoiceSapClases.InvoiceSapBusinessPertner bp= 
		     new InvoiceSapClases.InvoiceSapBusinessPertner(this.AccObject.Clave_SAP__c,this.AccObject.RazonSoc_o__c,this.AccObject.Name,this.AccObject.Phone,this.AccObject.RFC_o__c, ejecutivoName,
			 												this.AccObject.Id,This.ContactObject.Id,Utils.TERRITORIOS.get(this.AccObject.EstadoFact_o__c),
			 												calculateDefinicionImpuesto(), utils.getTaxRegime(this.AccObject.Regimen_Fiscal__c), ad, sc);
		system.debug('bp');
		system.debug(bp);
		return bp; 
	}
    private String calculateDefinicionImpuesto(){
		if (OppObject.Tipo_de_Facturacion__c =='Credito - TheNetwork'){
			return IVAP0;
		}
		else {
			return IVAP16;
		}
	}

    // @future(callout=true)
    public static void sendInvoiceToHUB(string oppId){
        facturaSAPToHUB instance = new facturaSAPToHUB(oppId);
        
        InvoiceSapClases.ResponseInvoiceDocument response;
        InvoiceSapClases.InvoiceSapDocument sapContract= instance.FillInvoiceSAPDocument();
        String accessToken = SalesToHub.getAccessToken();
        String bodyRequest = JSON.serialize(sapContract);

        system.debug('Petición SAP'+bodyRequest);


        HttpResponse resp = getBodyRequest(accessToken, bodyRequest);
        response = (InvoiceSapClases.ResponseInvoiceDocument)JSON.deserialize(resp.getBody(), InvoiceSapClases.ResponseInvoiceDocument.class);

        if (resp.getStatusCode() == 200) {
            instance.OppObject.Id_pedido__c = String.valueOf(response.Data.DocNum);
            instance.OppObject.NunOfertafisica_o__c = String.valueOf(response.Data.DocumentoRelacionado.DocNum);
            instance.OppObject.Factura_Aprobada__c = true;
            if(response.Data.DocumentoRelacionado.DocNum == null){
                instance.OppObject.Mensaje_SAP__c = 'Factura en proceso';
            }else{
                instance.OppObject.Mensaje_SAP__c = response.Data.DocumentoRelacionado.Mensaje;
            }
        }
        if(resp.getStatusCode() == 409){
            if(String.valueOf(response.Data.DocNum) == instance.OppObject.Id_pedido__c){
                instance.OppObject.Mensaje_SAP__c = response.Data.Mensaje;
                instance.OppObject.NunOfertafisica_o__c = String.valueOf(response.Data.DocumentoRelacionado.DocNum);
            }
        }
        else{
            instance.OppObject.Mensaje_SAP__c = 'Error de prueba '+resp.getStatusCode();
        }
        instance.OppObject.Fecha_de_Pedido__c = Datetime.now();
        instance.OppObject.Fecha_de_solicitud_factura__c = Datetime.now();

        update instance.OppObject;
    }

    private static HttpResponse getBodyRequest(String accessToken, String bodyRequest){
        OCCMClasesV2.ServiceConnRequest conn = new OCCMClasesV2.ServiceConnRequest();
            conn.UserName = IntegrationServices__c.getValues('InvoiceToHub').UserName__c;
            //    conn.Password = IntegrationServices__c.getValues('CTSalesHub').Password__c;
            conn.UrlService = IntegrationServices__c.getValues('InvoiceToHub').URLService__c;
            conn.UrlMethod = IntegrationServices__c.getValues('InvoiceToHub').URLMethod__c;
            conn.Accept = IntegrationServices__c.getValues('InvoiceToHub').Accept__c;
            conn.ContentType = IntegrationServices__c.getValues('InvoiceToHub').Content_Type__c;
            conn.IsActive = IntegrationServices__c.getValues('InvoiceToHub').IsActive__c;
            conn.NumAttempts = Integer.valueOf(IntegrationServices__c.getValues('InvoiceToHub').NumAttempts__c);
            conn.DelayMillis = Integer.valueOf(IntegrationServices__c.getValues('InvoiceToHub').DelayMillis__c);
            conn.GeneratedToken ='Bearer '+ accessToken;

            httputils http = new httputils(conn);
            HttpResponse res = http.MakeAuthorizedCallOut(bodyRequest, 200);

            return res;
    }
}
                                           ## Output:
$PSCommandPath                             ## C:\Users\user\Documents\code\ps\test.ps1
(Get-Item $PSCommandPath ).Extension       ## .ps1
(Get-Item $PSCommandPath ).Basename        ## test
(Get-Item $PSCommandPath ).Name            ## test.ps1
(Get-Item $PSCommandPath ).DirectoryName   ## C:\Users\user\Documents\code\ps
(Get-Item $PSCommandPath ).FullName        ## C:\Users\user\Documents\code\ps\test.ps1

$_. = $PSItem

$_.Extension        ## .ps1
$_.Basename         ## test
$_.Name             ## test.ps1
$_.DirectoryName    ## C:\Users\user\Documents\code\ps
$_.FullName         ## C:\Users\user\Documents\code\ps\test.ps1

$ConfigINI = (Get-Item $PSCommandPath ).DirectoryName+"\"+(Get-Item $PSCommandPath ).BaseName+".ini"

$ConfigINI                                 ## C:\Users\user\Documents\code\ps\test.ini
[ExtensionOf(tableStr(PurchReqLine))]
final class PurchReqLine_ADI_Finance_Extension
{
    public display real TotalBudget()
    {
        AccountingDistribution  AccountingDistribution;
        BudgetTransactionHeader BudgetTransactionHeader;
        BudgetTransactionLine   BudgetTransactionLine;
        DimensionAttributeValueCombination  ValueCombination, ValueCombinationBudget;

        select AccountingDistribution where AccountingDistribution.SourceDocumentLine == this.SourceDocumentLine;
        select ValueCombination 
            where ValueCombination.RecId == AccountingDistribution.LedgerDimension
            && ValueCombination.LedgerDimensionType == LedgerDimensionType::Account;
        while select ValueCombinationBudget 
            //where ValueCombinationBudget.DisplayValue == ValueCombination.DisplayValue
            where ValueCombinationBudget.MainAccount == ValueCombination.MainAccount
            && ValueCombinationBudget.LedgerDimensionType == LedgerDimensionType::Budget
        {
            if(this.getAttr(ValueCombination, 'DEPARTMENT') == this.getAttr(ValueCombinationBudget, 'DEPARTMENT')
                && this.getAttr(ValueCombination, 'COSTCENTER') == this.getAttr(ValueCombinationBudget, 'COSTCENTER'))
                break;
        }

        real amount;
        while select BudgetTransactionLine
            join BudgetTransactionHeader
            where BudgetTransactionLine.BudgetTransactionHeader == BudgetTransactionHeader.RecId
            && BudgetTransactionHeader.BudgetModelDataAreaId == curExt()
            && BudgetTransactionHeader.TransactionStatus == BudgetTransactionStatus::Completed
            && BudgetTransactionLine.LedgerDimension == ValueCombinationBudget.RecId
        {
            if(year(BudgetTransactionLine.Date) == year(this.purchReqTable().TransDate))
                amount+=BudgetTransactionLine.TransactionCurrencyAmount;

        }

        return amount;
    }

    protected RecId getAttr(DimensionAttributeValueCombination _combination,
        Name _attributeName)
    {
        DimensionAttributeLevelValueView valueView;
        DimensionAttribute attribute = DimensionAttribute::findByName(_attributeName);
        ;
        select DisplayValue from valueView
            where valueView.ValueCombinationRecId == _combination.recId
            && valueView.DimensionAttribute == attribute.RecId;
        return valueView.EntityInstance;
    }

}
<script>
  jQuery(document).ready(function($){
    var currentUrl = window.location.href;

    // Create a mapping of URLs and their respective links
    var urlMapping = {
      "https://www.basquedestination.com/en/rioja-alavesa-visita-a-la-cuna-del-vino/": "https://fareharbor.com/embeds/book/basquedestination/items/609105/?full-items=yes&flow=1338016",
      "https://www.basquedestination.com/en/tour-arquitectonico-de-bilbao-y-puente-de-bizcaia/": "https://fareharbor.com/embeds/book/basquedestination/items/609008/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/en/tour-pais-vasco-frances/": "https://fareharbor.com/embeds/book/basquedestination/items/609056/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/pintxotour-privado-en-bilbao-3/": "https://fareharbor.com/embeds/book/basquedestination/items/608963/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/tesoros-de-la-costa-vasca-zarautz-y-getaria-1/": "https://fareharbor.com/embeds/book/basquedestination/items/609084/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/costa-vasca-helicoptero/": "https://fareharbor.com/embeds/book/basquedestination/items/609061/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/entre-fogones-con-un-cocinero-sociedad-gastronomica/": "https://fareharbor.com/embeds/book/basquedestination/items/608992/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/getaria-villa-marinera/": "https://fareharbor.com/embeds/book/basquedestination/items/608997/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pintxotour-privado-en-san-sebastian-4/": "https://fareharbor.com/embeds/book/basquedestination/items/608963/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pintxotour-privado-en-bilbao/": "https://fareharbor.com/embeds/book/basquedestination/items/609109/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pesca-en-la-costa-vasca-ondare-experiencias-hotel-arbaso-basque-destination/": "https://fareharbor.com/embeds/book/basquedestination/items/609087/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/surfea-con-locales/": "https://fareharbor.com/embeds/book/basquedestination/items/609093/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/la-sidra-y-el-mar/": "https://fareharbor.com/embeds/book/basquedestination/items/609064/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/miercoles-de-mercado-y-queso/": "https://fareharbor.com/embeds/book/basquedestination/items/609066/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/visita-cultural-privada-de-bilbao-y-museo-guggenheim/": "https://fareharbor.com/embeds/book/basquedestination/items/609004/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/es/recorrido-a-pie-por-la-costa-vasca-y-el-flysch/": "https://fareharbor.com/embeds/book/basquedestination/items/609104/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/rioja-alavesa-3/": "https://fareharbor.com/embeds/book/basquedestination/items/609105/?full-items=yes&flow=1338016",
      "https://www.basquedestination.com/es/tierra-de-san-ignacio-la-ruta-de-los-tres-templos-1/": "https://fareharbor.com/embeds/book/basquedestination/items/608996/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/visita-cultural-privada-de-donostia-san-sebastian/": "https://fareharbor.com/embeds/book/basquedestination/items/608980/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/rioja-alavesa-visita-a-la-cuna-del-vino/": "https://fareharbor.com/embeds/book/basquedestination/items/609112/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/excursion-a-bilbao-nuestra-ciudad-mas-vanguardista/": "https://fareharbor.com/embeds/book/basquedestination/items/609002/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/es/banos-de-bosque-ondare-experiencias-hotel-arbaso-basque-destinaton/": "https://fareharbor.com/embeds/book/basquedestination/items/609089/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/vitoria-y-el-valle-salado-de-anana-1/": "https://fareharbor.com/embeds/book/basquedestination/items/609095/?full-items=yes&flow=1338020",
      "https://www.basquedestination.com/en/vitoria-y-el-valle-salado-de-anana-6/": "https://fareharbor.com/embeds/book/basquedestination/items/609000/?full-items=yes&flow=1338020"
    };

    // Check if the current URL matches one of the keys in the mapping
    if(urlMapping[currentUrl]) {
      // Find the button using the provided selector and update its link
      $('.reservaExperiencia').attr('href', urlMapping[currentUrl]);
    }
  });
</script>
.section--topics {
    background-image: url(../images/bg-sp.jpg);
    background-position: center top;
    background-repeat: no-repeat;
    background-size: cover;
}
API URL : https://sag.sanabil.com/gateway/COA_API/1.0/COAStatus
API KEY : x-Gateway-APIKey: 86e7ad1e-c84f-438a-a309-cd1216565dab
Launch your own White-Label Crypto Exchange with advanced trading features and top-tier security. Customize your platform with seamless UI, multi-asset support, and liquidity solutions. Ensure high-speed transactions with a secure, scalable infrastructure. Empower traders with an intuitive dashboard and robust risk management tools. Opris offers cutting-edge white-label solutions for your crypto exchange success.

Vist us >> https://www.opris.exchange/white-label-cryptocurrency-exchange-software/
It's possible to work on an entire folder tree, or a particular subset of files, just using the output of some other command through a pipe. Something like:

 	C:\TrID>dir d:\recovered_drive /s /b | trid -ce -@
   
 Definitions found:  5702
 Analyzing...

 File: d:\recovered_drive\notes
 100.0% (.RTF) Rich Text Format (5000/1)

 File: d:\recovered_drive\temp\FILE0001.CHK                           
  77.8% (.OGG) OGG Vorbis Audio (14014/3)
Supported languages
This is the list of all 297 languages currently supported by Prism, with their corresponding alias, to use in place of xxxx in the language-xxxx (or lang-xxxx) class:

Markup - markup, html, xml, svg, mathml, ssml, atom, rss
CSS - css
C-like - clike
JavaScript - javascript, js
ABAP - abap
ABNF - abnf
ActionScript - actionscript
Ada - ada
Agda - agda
AL - al
ANTLR4 - antlr4, g4
Apache Configuration - apacheconf
Apex - apex
APL - apl
AppleScript - applescript
AQL - aql
Arduino - arduino, ino
ARFF - arff
ARM Assembly - armasm, arm-asm
Arturo - arturo, art
AsciiDoc - asciidoc, adoc
ASP.NET (C#) - aspnet
6502 Assembly - asm6502
Atmel AVR Assembly - asmatmel
AutoHotkey - autohotkey
AutoIt - autoit
AviSynth - avisynth, avs
Avro IDL - avro-idl, avdl
AWK - awk, gawk
Bash - bash, sh, shell
BASIC - basic
Batch - batch
BBcode - bbcode, shortcode
BBj - bbj
Bicep - bicep
Birb - birb
Bison - bison
BNF - bnf, rbnf
BQN - bqn
Brainfuck - brainfuck
BrightScript - brightscript
Bro - bro
BSL (1C:Enterprise) - bsl, oscript
C - c
C# - csharp, cs, dotnet
C++ - cpp
CFScript - cfscript, cfc
ChaiScript - chaiscript
CIL - cil
Cilk/C - cilkc, cilk-c
Cilk/C++ - cilkcpp, cilk-cpp, cilk
Clojure - clojure
CMake - cmake
COBOL - cobol
CoffeeScript - coffeescript, coffee
Concurnas - concurnas, conc
Content-Security-Policy - csp
Cooklang - cooklang
Coq - coq
Crystal - crystal
CSS Extras - css-extras
CSV - csv
CUE - cue
Cypher - cypher
D - d
Dart - dart
DataWeave - dataweave
DAX - dax
Dhall - dhall
Diff - diff
Django/Jinja2 - django, jinja2
DNS zone file - dns-zone-file, dns-zone
Docker - docker, dockerfile
DOT (Graphviz) - dot, gv
EBNF - ebnf
EditorConfig - editorconfig
Eiffel - eiffel
EJS - ejs, eta
Elixir - elixir
Elm - elm
Embedded Lua templating - etlua
ERB - erb
Erlang - erlang
Excel Formula - excel-formula, xlsx, xls
F# - fsharp
Factor - factor
False - false
Firestore security rules - firestore-security-rules
Flow - flow
Fortran - fortran
FreeMarker Template Language - ftl
GameMaker Language - gml, gamemakerlanguage
GAP (CAS) - gap
G-code - gcode
GDScript - gdscript
GEDCOM - gedcom
gettext - gettext, po
Gherkin - gherkin
Git - git
GLSL - glsl
GN - gn, gni
GNU Linker Script - linker-script, ld
Go - go
Go module - go-module, go-mod
Gradle - gradle
GraphQL - graphql
Groovy - groovy
Haml - haml
Handlebars - handlebars, hbs, mustache
Haskell - haskell, hs
Haxe - haxe
HCL - hcl
HLSL - hlsl
Hoon - hoon
HTTP - http
HTTP Public-Key-Pins - hpkp
HTTP Strict-Transport-Security - hsts
IchigoJam - ichigojam
Icon - icon
ICU Message Format - icu-message-format
Idris - idris, idr
.ignore - ignore, gitignore, hgignore, npmignore
Inform 7 - inform7
Ini - ini
Io - io
J - j
Java - java
JavaDoc - javadoc
JavaDoc-like - javadoclike
Java stack trace - javastacktrace
Jexl - jexl
Jolie - jolie
JQ - jq
JSDoc - jsdoc
JS Extras - js-extras
JSON - json, webmanifest
JSON5 - json5
JSONP - jsonp
JS stack trace - jsstacktrace
JS Templates - js-templates
Julia - julia
Keepalived Configure - keepalived
Keyman - keyman
Kotlin - kotlin, kt, kts
KuMir (КуМир) - kumir, kum
Kusto - kusto
LaTeX - latex, tex, context
Latte - latte
Less - less
LilyPond - lilypond, ly
Liquid - liquid
Lisp - lisp, emacs, elisp, emacs-lisp
LiveScript - livescript
LLVM IR - llvm
Log file - log
LOLCODE - lolcode
Lua - lua
Magma (CAS) - magma
Makefile - makefile
Markdown - markdown, md
Markup templating - markup-templating
Mata - mata
MATLAB - matlab
MAXScript - maxscript
MEL - mel
Mermaid - mermaid
METAFONT - metafont
Mizar - mizar
MongoDB - mongodb
Monkey - monkey
MoonScript - moonscript, moon
N1QL - n1ql
N4JS - n4js, n4jsd
Nand To Tetris HDL - nand2tetris-hdl
Naninovel Script - naniscript, nani
NASM - nasm
NEON - neon
Nevod - nevod
nginx - nginx
Nim - nim
Nix - nix
NSIS - nsis
Objective-C - objectivec, objc
OCaml - ocaml
Odin - odin
OpenCL - opencl
OpenQasm - openqasm, qasm
Oz - oz
PARI/GP - parigp
Parser - parser
Pascal - pascal, objectpascal
Pascaligo - pascaligo
PATROL Scripting Language - psl
PC-Axis - pcaxis, px
PeopleCode - peoplecode, pcode
Perl - perl
PHP - php
PHPDoc - phpdoc
PHP Extras - php-extras
PlantUML - plant-uml, plantuml
PL/SQL - plsql
PowerQuery - powerquery, pq, mscript
PowerShell - powershell
Processing - processing
Prolog - prolog
PromQL - promql
.properties - properties
Protocol Buffers - protobuf
Pug - pug
Puppet - puppet
Pure - pure
PureBasic - purebasic, pbfasm
PureScript - purescript, purs
Python - python, py
Q# - qsharp, qs
Q (kdb+ database) - q
QML - qml
Qore - qore
R - r
Racket - racket, rkt
Razor C# - cshtml, razor
React JSX - jsx
React TSX - tsx
Reason - reason
Regex - regex
Rego - rego
Ren'py - renpy, rpy
ReScript - rescript, res
reST (reStructuredText) - rest
Rip - rip
Roboconf - roboconf
Robot Framework - robotframework, robot
Ruby - ruby, rb
Rust - rust
SAS - sas
Sass (Sass) - sass
Sass (SCSS) - scss
Scala - scala
Scheme - scheme
Shell session - shell-session, sh-session, shellsession
Smali - smali
Smalltalk - smalltalk
Smarty - smarty
SML - sml, smlnj
Solidity (Ethereum) - solidity, sol
Solution file - solution-file, sln
Soy (Closure Template) - soy
SPARQL - sparql, rq
Splunk SPL - splunk-spl
SQF: Status Quo Function (Arma 3) - sqf
SQL - sql
Squirrel - squirrel
Stan - stan
Stata Ado - stata
Structured Text (IEC 61131-3) - iecst
Stylus - stylus
SuperCollider - supercollider, sclang
Swift - swift
Systemd configuration file - systemd
T4 templating - t4-templating
T4 Text Templates (C#) - t4-cs, t4
T4 Text Templates (VB) - t4-vb
TAP - tap
Tcl - tcl
Template Toolkit 2 - tt2
Textile - textile
TOML - toml
Tremor - tremor, trickle, troy
Turtle - turtle, trig
Twig - twig
TypeScript - typescript, ts
TypoScript - typoscript, tsconfig
UnrealScript - unrealscript, uscript, uc
UO Razor Script - uorazor
URI - uri, url
V - v
Vala - vala
VB.Net - vbnet
Velocity - velocity
Verilog - verilog
VHDL - vhdl
vim - vim
Visual Basic - visual-basic, vb, vba
WarpScript - warpscript
WebAssembly - wasm
Web IDL - web-idl, webidl
WGSL - wgsl
Wiki markup - wiki
Wolfram language - wolfram, mathematica, nb, wl
Wren - wren
Xeora - xeora, xeoracube
XML doc (.net) - xml-doc
Xojo (REALbasic) - xojo
XQuery - xquery
YAML - yaml, yml
YANG - yang
Zig - zig
Wow! The future is here, and AI is your golden ticket to success! From mind-blowing automation to game-changing AI solutions, the opportunities are limitless. Imagine building a business that runs on innovation, scales effortlessly, and generates massive profits!

Ready to ride the AI wave and turn your vision into reality? The time to start is NOW! 
  
Visit : https://www.dappfort.com/blog/ai-business-ideas/

Instant Reach Experts:

Visit us :  https://www.dappfort.com/cryptocurrency-exchange-development-company/      
Contact : +91 8838534884
Mail : sales@dappfort.com
document.addEventListener("visibilitychange", function(event) {
    Object.defineProperty(document, "hidden", { value: false, configurable: true });
    Object.defineProperty(document, "visibilityState", { value: "visible", configurable: true });
});
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages/NetBSD/$(uname -p)/$(uname -r|cut -f '1 2' -d.)/All/"
# export PKG_PATH
# pkg_add pkgin
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple, Optional, Union, Any
from sklearn.metrics.pairwise import cosine_similarity
import logging
import os
import time
import json
from functools import lru_cache
import hashlib

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class TextEmbedder:
    def __init__(self, api_key: str, cache_dir: Optional[str] = None, 
                batch_size: int = 10, max_retries: int = 3, 
                retry_delay: int = 2):

        self.api_key = 'AIzaSyCoNC4SCFhrO8QvD34a9KMqyNQ-mudMtQ4'
        self.model = "models/text-embedding-004"
        self.cache_dir = cache_dir
        self.embedding_cache = {}
        self.batch_size = batch_size
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        
      
        self._genai = None
        
      
        if cache_dir:
            os.makedirs(cache_dir, exist_ok=True)
            self._load_cache()
    
    def _get_genai(self):
       
        if self._genai is None:
            import google.generativeai as genai
            genai.configure(api_key=self.api_key)
            self._genai = genai
        return self._genai
    
    def _load_cache(self):
      
        if not self.cache_dir:
            return
            
        cache_file = os.path.join(self.cache_dir, "embedding_cache.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r') as f:
                    self.embedding_cache = json.load(f)
                logger.info(f"Loaded {len(self.embedding_cache)} cached embeddings")
            except Exception as e:
                logger.error(f"Error loading cache: {str(e)}")
    
    def _save_cache(self):
       
        if not self.cache_dir:
            return
            
        cache_file = os.path.join(self.cache_dir, "embedding_cache.json")
        try:
          
            cache_subset = dict(list(self.embedding_cache.items())[-10000:])
            with open(cache_file, 'w') as f:
                json.dump(cache_subset, f)
            logger.info(f"Saved {len(cache_subset)} embeddings to cache")
        except Exception as e:
            logger.error(f"Error saving cache: {str(e)}")
    
    def _hash_text(self, text: str) -> str:
      
        return hashlib.md5(text.encode('utf-8')).hexdigest()
    
    def _combine_text_features(self, row: Union[pd.Series, Dict], text_columns: List[str]) -> str:
       
        text_values = []
        
      
        if isinstance(row, pd.Series):
            for col in text_columns:
                if col in row.index and pd.notna(row[col]):
                    text_values.append(f"{col}: {str(row[col])}")
        else:
            for col in text_columns:
                if col in row and row[col] is not None:
                    text_values.append(f"{col}: {str(row[col])}")
                    
        return " | ".join(text_values)
    
  
    def get_brand_text_features(self, brand: Union[pd.Series, Dict]) -> str:
      
        if isinstance(brand, pd.Series):
            brand_dict = brand.to_dict()
        else:
            brand_dict = brand
            
        text_columns = [
            'industry',
            'target_audience',
            'brand_messaging',
            'tone_voice',
            'category_alignment',
            'brand_alignment_keywords',
            'content_type'
        ]
        
        text = self._combine_text_features(brand_dict, text_columns)
        return text
    
  
    def get_influencer_text_features(self, influencer: Union[pd.Series, Dict]) -> str:
       
        if isinstance(influencer, pd.Series):
            influencer_dict = influencer.to_dict()
        else:
            influencer_dict = influencer
            
        text_columns = [
            'category_niche',
            'audience_demographics',
            'audience_interests',
            'content_types'
        ]
        
        text = self._combine_text_features(influencer_dict, text_columns)
        return text
    
    def get_embedding(self, text: str) -> np.ndarray:
       
        if not text or text.strip() == "":
            return np.zeros(1024)
            
      
        text_hash = self._hash_text(text)
        if text_hash in self.embedding_cache:
            return np.array(self.embedding_cache[text_hash])
        
       
        for attempt in range(self.max_retries):
            try:
                genai = self._get_genai()
                result = genai.embed_content(
                    model=self.model,
                    content=text
                )
                
                embedding = np.array(result['embedding'])
                
           
                self.embedding_cache[text_hash] = embedding.tolist()
          
                if len(self.embedding_cache) % 100 == 0:
                    self._save_cache()
                    
                return embedding
            except Exception as e:
                logger.error(f"Error getting embedding (attempt {attempt+1}/{self.max_retries}): {str(e)}")
                if attempt < self.max_retries - 1:
                    time.sleep(self.retry_delay)
        
       
        logger.error(f"All embedding attempts failed for text: {text[:100]}...")
        return np.zeros(1024)
    
    def batch_get_embeddings(self, texts: List[str]) -> List[np.ndarray]:
      
        results = []
        
       
        for i in range(0, len(texts), self.batch_size):
            batch = texts[i:i+self.batch_size]
            
         
            batch_results = []
            for text in batch:
                embedding = self.get_embedding(text)
                batch_results.append(embedding)
            
            results.extend(batch_results)
            
           
            if i + self.batch_size < len(texts):
                time.sleep(0.5) 
        
        return results
    
    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
        
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
      
        similarity = cosine_similarity(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0]
        
        return float(similarity)
    
    def calculate_batch_similarities(self, brand_texts: List[str], 
                                  influencer_texts: List[str]) -> np.ndarray:
       
        brand_embeddings = self.batch_get_embeddings(brand_texts)
        influencer_embeddings = self.batch_get_embeddings(influencer_texts)
        
     
        brand_matrix = np.vstack(brand_embeddings)
        influencer_matrix = np.vstack(influencer_embeddings)
        
      
        similarity_matrix = cosine_similarity(brand_matrix, influencer_matrix)
        
        return similarity_matrix
    
    def print_detailed_match_analysis(self, brand: Union[pd.Series, Dict], 
                                   influencer: Union[pd.Series, Dict], 
                                   similarity_score: float):
  
        logger.info("=" * 80)
        
       
        brand_id = brand.name if isinstance(brand, pd.Series) else brand.get('brand_id', 'Unknown')
        brand_name = brand.get('name', 'Unknown Brand') if isinstance(brand, pd.Series) else brand.get('name', 'Unknown Brand')
        
       
        influencer_id = influencer.name if isinstance(influencer, pd.Series) else influencer.get('influencer_id', 'Unknown')
        influencer_name = influencer.get('name', 'Unknown Influencer') if isinstance(influencer, pd.Series) else influencer.get('name', 'Unknown Influencer')
        
      
        logger.info("Brand Details:")
        logger.info(f"  ID: {brand_id}")
        logger.info(f"  Name: {brand_name}")
        
      
        logger.info("\nInfluencer Details:")
        logger.info(f"  ID: {influencer_id}")
        logger.info(f"  Name: {influencer_name}")
        logger.info("-" * 80)
        
      
        logger.info("\nBrand Text Features:")
        brand_text = self.get_brand_text_features(brand)
        for feature in brand_text.split(" | "):
            logger.info(f"  - {feature}")
            
        logger.info("\nInfluencer Text Features:")
        influencer_text = self.get_influencer_text_features(influencer)
        for feature in influencer_text.split(" | "):
            logger.info(f"  - {feature}")
        
       
        logger.info("\nText Similarity Analysis:")
        logger.info(f"  Score: {similarity_score:.4f}")
        
       
        logger.info("\nScore Interpretation:")
        if similarity_score >= 0.8:
            logger.info("  Excellent Match (≥0.8):")
            logger.info("  - Very strong text similarity")
            logger.info("  - High potential for successful collaboration")
            logger.info("  - Strong alignment in multiple areas")
        elif similarity_score >= 0.6:
            logger.info("  Good Match (≥0.6):")
            logger.info("  - Significant text similarity")
            logger.info("  - Good potential for collaboration")
            logger.info("  - Notable alignment in key areas")
        elif similarity_score >= 0.4:
            logger.info("  Moderate Match (≥0.4):")
            logger.info("  - Some text similarity")
            logger.info("  - Potential for collaboration with careful consideration")
            logger.info("  - Partial alignment in some areas")
        else:
            logger.info("  Weak Match (<0.4):")
            logger.info("  - Limited text similarity")
            logger.info("  - May need to reconsider match")
            logger.info("  - Limited alignment in key areas")
        
        logger.info("=" * 80)
    
    def get_text_similarity_matrix(self, brands_df: pd.DataFrame, 
                                influencers_df: pd.DataFrame, 
                                batch_size: int = 10) -> np.ndarray:

        start_time = time.time()
        logger.info(f"Calculating text similarity matrix for {len(brands_df)} brands and {len(influencers_df)} influencers")
        
       
        if self.cache_dir:
            cache_path = os.path.join(self.cache_dir, f"text_similarity_{len(brands_df)}_{len(influencers_df)}.npz")
            if os.path.exists(cache_path):
                logger.info(f"Loading text similarity matrix from cache: {cache_path}")
                data = np.load(cache_path)
                matrix = data['matrix']
                logger.info(f"Loaded text similarity matrix in {time.time() - start_time:.2f} seconds")
                return matrix
        
      
        similarity_matrix = np.zeros((len(brands_df), len(influencers_df)))
        
   
        for i in range(0, len(brands_df), batch_size):
            brand_chunk = brands_df.iloc[i:i+batch_size]
            brand_texts = [self.get_brand_text_features(brand) for _, brand in brand_chunk.iterrows()]
            
            for j in range(0, len(influencers_df), batch_size):
                influencer_chunk = influencers_df.iloc[j:j+batch_size]
                influencer_texts = [self.get_influencer_text_features(influencer) for _, influencer in influencer_chunk.iterrows()]
                
             
                batch_similarities = self.calculate_batch_similarities(brand_texts, influencer_texts)
                
              
                for bi, (brand_idx, _) in enumerate(brand_chunk.iterrows()):
                    for ii, (influencer_idx, _) in enumerate(influencer_chunk.iterrows()):
                        global_brand_idx = brands_df.index.get_loc(brand_idx)
                        global_influencer_idx = influencers_df.index.get_loc(influencer_idx)
                        similarity_matrix[global_brand_idx, global_influencer_idx] = batch_similarities[bi, ii]
                
                logger.info(f"Processed batch: brands {i}-{i+len(brand_chunk)-1}, influencers {j}-{j+len(influencer_chunk)-1}")
        
      
        if self.cache_dir:
            logger.info(f"Saving text similarity matrix to cache: {cache_path}")
            np.savez_compressed(cache_path, matrix=similarity_matrix)
        
        logger.info(f"Text similarity matrix calculation completed in {time.time() - start_time:.2f} seconds")
        return similarity_matrix
    
    def save_similarity_scores(self, brands_df: pd.DataFrame, 
                            influencers_df: pd.DataFrame,
                            output_path: str):
       
        logger.info(f"Calculating and saving similarity scores to {output_path}")
        start_time = time.time()
        
        all_scores = []
        batch_size = 5  
        
     
        for i in range(0, len(brands_df), batch_size):
            brand_chunk = brands_df.iloc[i:i+batch_size]
            
            for j in range(0, len(influencers_df), batch_size):
                influencer_chunk = influencers_df.iloc[j:j+batch_size]
                
                # Calculate batch scores
                for _, brand in brand_chunk.iterrows():
                    brand_text = self.get_brand_text_features(brand)
                    
                    for _, influencer in influencer_chunk.iterrows():
                        influencer_text = self.get_influencer_text_features(influencer)
                        similarity = self.calculate_text_similarity(brand_text, influencer_text)
                        
                        all_scores.append({
                            'brand_id': brand.name,
                            'brand_name': brand.get('name', 'Unknown Brand'),
                            'influencer_id': influencer.name,
                            'influencer_name': influencer.get('name', 'Unknown Influencer'),
                            'similarity_score': similarity,
                            'brand_text': brand_text,
                            'influencer_text': influencer_text
                        })
                
                logger.info(f"Processed scores for brands {i}-{i+len(brand_chunk)-1}, influencers {j}-{j+len(influencer_chunk)-1}")
        
      
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        
      
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        scores_df.to_csv(output_path, index=False)
        
        logger.info(f"Saved {len(scores_df)} similarity scores to {output_path} in {time.time() - start_time:.2f} seconds")
#include <iostream>
using namespace std;

int Partition(int a[], int low, int high)
{
  int pivot = a[low];
  int i = low;
  int j = high;
  
  while(i < j)
  {
    while(a[i] <= pivot && i <= high - 1)
      ++i;
      
    while(a[j] > pivot && j >= low + 1)
      --j;
      
    if(i < j)
      swap(a[i], a[j]);
  }
  
  swap(a[low], a[j]);
  
  return j;
}

void QuickSort(int a[], int low, int high)
{
  if(low < high)
  {
    int pivotIndex = Partition(a, low, high);
    
    QuickSort(a, low, pivotIndex - 1);
    QuickSort(a, pivotIndex + 1, high);
  }
}

int main() 
{
  int n;
  cin >> n;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  QuickSort(a, 0, n-1);
    
  for(int i = 0; i < n; ++i)
    cout << a[i] << " ";
    
  return 0;
}
{% if product.metafields.custom.file_url != blank %}
  <ul>
    {% for url in product.metafields.custom.file_url.value %}
      <li>
        <a href="{{ url }}" target="_blank">📄 Download PDF</a>
      </li>
    {% endfor %}
  </ul>
{% endif %}
let
  Conditions = {
    [ColumnName] = "1501", 
    [ColumnName] = "1500", 
    [ColumnName] >= "1100" and [ColumnName] < "1200"
  }, 
  Results = {"FFPC", "PCFam", "Différentielle"}
in
  Results{List.PositionOf(Conditions, true)}
Make changes in one product first to see the id_tax_rules_group then do the bulk edit:


UPDATE `ps_product` SET `id_tax_rules_group`='2' WHERE  `id_tax_rules_group`=1;

UPDATE `ps_product_shop` SET `id_tax_rules_group`='2' WHERE  `id_tax_rules_group`=1;
background-image: url(../images/icon-arrow.svg);
background-size: contain;
background-position: center;
White Label Cryptocurrency Exchange Script is a ready-made solution for creating crypto exchanges quickly and securely with advanced functionalities. With this script, entrepreneurs can make customizations based on their needs and start a crypto exchange business quickly.
star

Thu Feb 27 2025 02:02:42 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Thu Feb 27 2025 01:52:33 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Thu Feb 27 2025 00:31:29 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Wed Feb 26 2025 19:49:55 GMT+0000 (Coordinated Universal Time)

@gbritgs

star

Wed Feb 26 2025 18:33:32 GMT+0000 (Coordinated Universal Time)

@gbritgs

star

Wed Feb 26 2025 18:21:17 GMT+0000 (Coordinated Universal Time) https://www.heurio.co/welcome

@oliveiranana

star

Wed Feb 26 2025 17:20:27 GMT+0000 (Coordinated Universal Time)

@ddover

star

Wed Feb 26 2025 13:12:10 GMT+0000 (Coordinated Universal Time) https://www.coinsqueens.com/solana-blockchain-development-company

@athenapetridis

star

Wed Feb 26 2025 12:28:52 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/decentralized-exchange-script/

@Flynnrider #decentralized #exchange #script

star

Wed Feb 26 2025 12:21:21 GMT+0000 (Coordinated Universal Time)

@Pooja

star

Wed Feb 26 2025 10:50:56 GMT+0000 (Coordinated Universal Time)

@piyushkumar121 #python

star

Wed Feb 26 2025 10:04:54 GMT+0000 (Coordinated Universal Time) https://bettoblock.com/sports-betting-api-providers/

@marthacollins ##bettingapi #sportsbetting api provider #sportsbetting api integration #bettingapi provider

star

Wed Feb 26 2025 09:19:53 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/top-blockchains-to-launch-nft-marketplace/

@Emmawoods

star

Wed Feb 26 2025 09:06:16 GMT+0000 (Coordinated Universal Time) https://www.matichon.co.th/local/news_3467590

@kiritokato

star

Wed Feb 26 2025 07:56:16 GMT+0000 (Coordinated Universal Time)

@Pooja

star

Wed Feb 26 2025 06:20:27 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/white-label-tokenization-platform/

@LilianAnderson #whitelabeltokenization #tokenizationplatform #blockchainforstartups #digitalassetsolutions #tokenizeyourbusiness

star

Wed Feb 26 2025 06:16:46 GMT+0000 (Coordinated Universal Time)

@piyushkumar121 #python

star

Wed Feb 26 2025 03:03:11 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Wed Feb 26 2025 01:38:24 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Tue Feb 25 2025 18:55:24 GMT+0000 (Coordinated Universal Time)

@jesus

star

Tue Feb 25 2025 16:04:49 GMT+0000 (Coordinated Universal Time)

@baamn #powershell #filename

star

Tue Feb 25 2025 14:18:27 GMT+0000 (Coordinated Universal Time)

@MinaTimo

star

Tue Feb 25 2025 12:32:46 GMT+0000 (Coordinated Universal Time)

@Shira

star

Tue Feb 25 2025 10:34:02 GMT+0000 (Coordinated Universal Time)

@erika

star

Tue Feb 25 2025 10:07:11 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/cryptocurrency-exchange-business-plan/

@CharleenStewar ##cryptocurrencyexchange business plan

star

Tue Feb 25 2025 08:54:31 GMT+0000 (Coordinated Universal Time)

@MinaTimo

star

Tue Feb 25 2025 08:19:25 GMT+0000 (Coordinated Universal Time) https://www.opris.exchange/white-label-cryptocurrency-exchange-development/

@oprisexchange #cryptoexchange #cryptocurrency #bitcoin #binanceclone #opris #white_label_software

star

Tue Feb 25 2025 08:08:54 GMT+0000 (Coordinated Universal Time)

@baamn #trid #stdin

star

Tue Feb 25 2025 07:53:08 GMT+0000 (Coordinated Universal Time) https://prismjs.com/

@baamn #markdown #codeblock

star

Tue Feb 25 2025 06:34:26 GMT+0000 (Coordinated Universal Time)

@reiddd #javascript

star

Tue Feb 25 2025 06:33:26 GMT+0000 (Coordinated Universal Time) https://pkgsrc.org/

@sercantas

star

Tue Feb 25 2025 05:50:31 GMT+0000 (Coordinated Universal Time) https://www.addustechnologies.com/crypto-forex-trading-with-mt4-mt5-development

@Seraphina

star

Tue Feb 25 2025 05:43:41 GMT+0000 (Coordinated Universal Time)

@piyushkumar121 #pytho

star

Tue Feb 25 2025 03:02:41 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Tue Feb 25 2025 02:03:09 GMT+0000 (Coordinated Universal Time)

@procodefinder

star

Mon Feb 24 2025 15:54:33 GMT+0000 (Coordinated Universal Time)

@davidb2107 #power_query

star

Mon Feb 24 2025 15:15:59 GMT+0000 (Coordinated Universal Time) https://www.prestashop.com/forums/topic/633322-solvedhow-to-set-a-taxrule-for-all-products/

@caovillanueva ##mysql

star

Mon Feb 24 2025 14:27:47 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 14:17:12 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:50:58 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:39:17 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:35:21 GMT+0000 (Coordinated Universal Time) undefined

@пп

star

Mon Feb 24 2025 13:34:13 GMT+0000 (Coordinated Universal Time) https://yandex.ru/search/?text

@пп

star

Mon Feb 24 2025 13:26:10 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 12:55:22 GMT+0000 (Coordinated Universal Time) https://www.trioangle.com/bybit-clone-script/

@Johnhendrick #java #javascript #django #react.js #angular #android #asp.net

star

Mon Feb 24 2025 10:15:56 GMT+0000 (Coordinated Universal Time) https://appticz.com/cryptocurrency-exchange-script

@nithivandhana #crypto #cryptocurrency #cryptoexchangescript #bitcoinexchangescript #cryptocurrencyexchangescript

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension