Snippets Collections
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
import logging


logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class TextEmbedder:
    def __init__(self, api_key: str = None):
        """
        Initialize TextEmbedder with a sentence-transformer model.
        The api_key parameter is kept for backward compatibility but is not used.
        """
        # Load the sentence transformer model (api_key is not needed, kept for compatibility)
        try:
            # all-MiniLM-L6-v2 is a good balance of speed and performance
            self.model = SentenceTransformer('all-MiniLM-L6-v2')
            logger.info("Successfully loaded sentence-transformers model: all-MiniLM-L6-v2")
        except Exception as e:
            logger.error(f"Error loading sentence-transformers model: {str(e)}")
            raise
        
    def _combine_text_features(self, row: pd.Series, text_columns: List[str]) -> str:
        """
        Combine multiple text columns from a series into a single text feature.
        """
        text_values = []
        for col in text_columns:
            if col in row and pd.notna(row[col]):
                text_values.append(f"{col}: {str(row[col])}")
        return " | ".join(text_values)
    
    def get_brand_text_features(self, brand: pd.Series) -> str:
        """
        Extract relevant text features from brand data.
        """
        text_columns = [
            'industry',
            'target_audience',
            'brand_messaging',
            'tone_voice',
            'category_alignment',
            'brand_alignment_keywords',
            'content_type'
        ]
        return self._combine_text_features(brand, text_columns)
    
    def get_influencer_text_features(self, influencer: pd.Series) -> str:
        """
        Extract relevant text features from influencer data.
        """
        text_columns = [
            'category_niche',
            'audience_demographics',
            'audience_interests',
            'content_types'
        ]
        return self._combine_text_features(influencer, text_columns)
    
    def get_embedding(self, text: str) -> np.ndarray:
        """
        Generate embeddings for a text using sentence-transformers.
        """
        try:
            if not text or text.isspace():
                # Return zero vector if text is empty or only whitespace
                return np.zeros(self.model.get_sentence_embedding_dimension())
                
            # Get embedding from sentence-transformers
            embedding = self.model.encode(text)
            return embedding
        except Exception as e:
            logger.error(f"Error getting embedding: {str(e)}")
            # Return zero vector with the correct dimensions for the model
            return np.zeros(self.model.get_sentence_embedding_dimension())
            
    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
        """
        Calculate cosine similarity between brand and influencer text.
        """
        if not brand_text or not influencer_text:
            logger.warning("Empty text provided for similarity calculation")
            return 0.0
            
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
        similarity = cosine_similarity(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0]
        
        return float(similarity)

    def print_detailed_match_analysis(self, brand: pd.Series, influencer: pd.Series, similarity_score: float):
        """
        Print detailed analysis of the match between a brand and influencer.
        """
        print("\n" + "="*80)
      
        print("Brand Details:")
        print(f"  ID: {brand.name}") 
        print(f"  Name: {brand.get('name', 'Unknown Brand')}")
        
        print("\nInfluencer Details:")
        print(f"  ID: {influencer.name}") 
        print(f"  Name: {influencer.get('name', 'Unknown Influencer')}")
        print("-"*80)
        
        print("\nBrand Text Features:")
        brand_text = self.get_brand_text_features(brand)
        for feature in brand_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nInfluencer Text Features:")
        influencer_text = self.get_influencer_text_features(influencer)
        for feature in influencer_text.split(" | "):
            print(f"  - {feature}")
            
        print("\nText Similarity Analysis:")
        print(f"  Score: {similarity_score:.4f}")
        
        print("\nScore Interpretation:")
        if similarity_score >= 0.8:
            print("  Excellent Match (≥0.8):")
            print("  - Very strong text similarity")
            print("  - High potential for successful collaboration")
            print("  - Strong alignment in multiple areas")
        elif similarity_score >= 0.6:
            print("  Good Match (≥0.6):")
            print("  - Significant text similarity")
            print("  - Good potential for collaboration")
            print("  - Notable alignment in key areas")
        elif similarity_score >= 0.4:
            print("  Moderate Match (≥0.4):")
            print("  - Some text similarity")
            print("  - Potential for collaboration with careful consideration")
            print("  - Partial alignment in some areas")
        else:
            print("  Weak Match (<0.4):")
            print("  - Limited text similarity")
            print("  - May need to reconsider match")
            print("  - Limited alignment in key areas")
        
        print("="*80)

    def get_text_similarity_matrix(self, brands_df: pd.DataFrame, 
                                 influencers_df: pd.DataFrame) -> np.ndarray:
        """
        Calculate text similarity matrix between all brands and influencers.
        """
        similarity_matrix = np.zeros((len(brands_df), len(influencers_df)))
        
        print("\nCalculating Text Similarity Scores:")
        print("="*80)
        
        all_scores = []
        
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                similarity_matrix[brands_df.index.get_loc(i),
                                influencers_df.index.get_loc(j)] = similarity
                
                all_scores.append({
                    'brand_id': brand.name, 
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity
                })
                
                self.print_detailed_match_analysis(brand, influencer, similarity)
                
                completed += 1
                if completed % 10 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} comparisons ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        
        print("\nTop 10 Text Similarity Matches:")
        print("="*80)
        print(scores_df[['brand_id', 'brand_name', 'influencer_id', 'influencer_name', 'similarity_score']].head(10).to_string(index=False))
        print("="*80)
        
        return similarity_matrix

    def save_similarity_scores(self, brands_df: pd.DataFrame, 
                             influencers_df: pd.DataFrame,
                             output_path: str):
        """
        Calculate and save all similarity scores to a CSV file.
        """
        all_scores = []
        total_comparisons = len(brands_df) * len(influencers_df)
        completed = 0
        
        logger.info(f"Starting to calculate similarity scores for {total_comparisons} brand-influencer pairs")
        
        for i, brand in brands_df.iterrows():
            brand_text = self.get_brand_text_features(brand)
            
            for j, influencer in influencers_df.iterrows():
                influencer_text = self.get_influencer_text_features(influencer)
                similarity = self.calculate_text_similarity(brand_text, influencer_text)
                
                all_scores.append({
                    'brand_id': brand.name,
                    'brand_name': brand.get('name', 'Unknown Brand'),
                    'influencer_id': influencer.name,
                    'influencer_name': influencer.get('name', 'Unknown Influencer'),
                    'similarity_score': similarity,
                    'brand_text': brand_text,
                    'influencer_text': influencer_text
                })
                
                completed += 1
                if completed % 20 == 0 or completed == total_comparisons:
                    logger.info(f"Progress: {completed}/{total_comparisons} ({(completed/total_comparisons)*100:.1f}%)")
        
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        scores_df.to_csv(output_path, index=False)
        logger.info(f"Saved detailed similarity scores to {output_path}")
#include <iostream>
#include <unordered_set>
using namespace std;

void FindNext(int a[], int b[], int n, int q)
{
  unordered_set<int> seen(a, a+n);
  
  for(int i = 0; i < q; ++i)
  {
    int nextVal = b[i] + 1;
    if(seen.find(nextVal) != seen.end())
      ++nextVal;
      
    b[i] = nextVal;
  }
  
  for(int i = 0; i < q; ++i)
    cout << b[i] << " ";
}

int main() 
{
  int n, q;
  cin >> n >> q;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  int b[q];
  for(int i = 0; i < q; ++i)
    cin >> b[i];
  
  FindNext(a, b, n, q);
  
  return 0;
}
#include <iostream>
using namespace std;

int Partition(int a[], int low, int high)
{
  int pivot = a[low];
  int i = low; 
  int j = high;
  
  while(i < j)
  {
    while(a[i] >= pivot && i <= high - 1)
      ++i;
      
    while(a[j] < pivot && j >= low + 1)
      --j;
      
    if(i < j)
      swap(a[i], a[j]);
  }
  
  swap(a[low], a[j]);
  return j;
}

void QuickSort(int a[], int low, int high)
{
  if(low < high)
  {
    int pivotIndex = Partition(a, low, high);
    
    QuickSort(a, low, pivotIndex - 1);
    QuickSort(a, pivotIndex + 1, high);
  }
}

int main() 
{
  int n;
  cin >> n;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  QuickSort(a, 0, n-1);
  
  for(int i = 0; i < n; ++i)
    cout << a[i] << " ";
  
  return 0;
}
public with sharing class facturaSAPToHUB {
    public String OppId {get; set;}
    private Opportunity OppObject=null;
	private Account AccObject=null;
	private Contact ContactObject=null;
	private User UserObject=null;
    private String IVAP0 ='IVAP0';
	private String IVAP16 = 'IVAP16';

    public facturaSAPToHUB(String Id) {
        this.OppId=Id;
		FillOpportunity();
    }

    private void FillOpportunity(){
		Opportunity opp;
		List<Opportunity> opportunityList = [Select Id,AccountId ,OwnerId, Name,E_mail_para_env_o_de_Factura_Elect_nica__c,FormaDePago_o__c,CurrencyIsoCode,
											 		 FechaFactura_o__c, Comentarios__c,MembresaPagadaCon_o__c, Referencia_Numerica_del__c,InicioVigencia_o__c,
											 		FinVigencia_o__c, Tipo_de_Facturacion__c,EstatusFactura__c,CreatedDate,TipoCambio_o__c, Importe_Descuento__c,
													NumFact__c, Referencia_Bancaria__c,Orden_de_Compra__c,N_mero_de_Proveedor__c, Observaciones_SAP__c, 
													Clasificaci_n_de_Factura__c,Banco__c, Importe__c, Referencia_Banc__c,NunOfertafisica_o__c,Estatus_de_pago__c,
													Fecha_de_Pago_Bancario__c,Concepto_de_Cobro__c,FechaPago__c, Cambio_Datos_de_Facturaci_n__c, Clave_metodo_pago__c, 
													N_mero_de_Cuenta_Pago__c, Folio_Oportunidad_sustituida__c, N_Contable_en_SAP_Sustituida__c, Sustituir_Datos__c, EjecutivoAsig_o__c,
													Id_pedido__c, IdPago__c, codigo_error_SAP__c, Mensaje_error_de_SAP__c, Factura_Aprobada__c, Fecha_de_Pedido__c, 
													Mensaje_de_error_pedido__c, Error_al_crear_pedido__c, IdBillingOcc__c, IsBillingOccText__c, ClienteSAP_venta_anterior__c, Hub_Sale__c, Navision_Draft_ID__c
											 	From Opportunity  where  Id =:this.OppId  LIMIT 1];
		if (opportunityList !=null ){
			this.OppObject = opportunityList[0];
			// system.debug('this.IdPedido');
			// system.debug(this.IdPedido);
		}
	}
    private Integer getAliasVentas(String id){
		List<User> ejecutivoList = [Select Alias from User where Id in (Select EjecutivoAsig_o__c From Opportunity  where  Id =: id)];
		Integer ejecutivoName=-1;
		try{
			if(ejecutivoList.size()!=0){
				User ejecutivo = ejecutivoList[0];
				ejecutivoName =Integer.valueOf(ejecutivo.Alias); 
			}	
		}catch(System.TypeException e){
			return -1;
		}
		return ejecutivoName;
	}

    private void FillAccount(){
		Account acc=null;
		// List<Account> accountList;
		String AccountId;

		system.debug('Facturo a una cuenta facturadora ' + this.OppObject.isBillingOccText__c);
		system.debug('Facturo a la cuenta de la venta ' + this.OppObject.AccountId);
		//validatation for crossaccount to invoice correct account
		if (this.OppObject.IsBillingOccText__c != null && this.OppObject.IsBillingOccText__c != '') {
			AccountId = this.OppObject.IsBillingOccText__c;
		} else  {
			AccountId = this.OppObject.AccountId;
		}

		List<Account> accountList = [Select Clave_SAP__c,RazonSoc_o__c,Id, Personamf__c,Name, Phone,Telefono2_o__c,Fax,
											RFC_o__c,Website,Cliente_SAE__c,PaisFact_o__c, No_de_interior__c, No_de_Exterior__c, EstadoFact_o__c,
											 DelegMunicFact_o__c, Colonia_de_Facturaci_n__c, CodigoPostalFact_o__c, CiudadFact_opcional_o__c,
											 CalleFact_o__c, Cuenta_Virtual_Banamex__c, Correo_para_envio_factura__c, Regimen_Fiscal__c, Tipo_de_uso_de_CFDI__c
										 From Account  where  Id =: AccountId LIMIT 1];


		if (accountList !=null )
			 this.AccObject = accountList[0];
	}

    	
	private void FillContact(){
		List<Contact> contactList = [Select Title, Phone, OtherPhone ,MobilePhone, FirstName, LastName,
												Fax, Email,Id From Contact c where Id in(Select ContactId 
											From OpportunityContactRole o where  OpportunityId =:This.OppObject.Id and IsPrimary=true )];
		Contact contacto = null;
		if(contactList.size()!=0){
			this.ContactObject = contactList[0];
		}else{
			this.ContactObject = new Contact();
			this.ContactObject.FirstName='Nombre';
			this.ContactObject.LastName='Apellido';
			this.ContactObject.Phone='1234567890';
			this.ContactObject.Title= 'Puesto';
			this.ContactObject.Phone='1234567890';
			this.ContactObject.MobilePhone='9012345678';
			this.ContactObject.Email='correo@correo.com';
			//Validar si son requerido y que se ha estado enviando
		}
	}
	private void FillUser(){
		List<User> usersList = [SELECT Id, Name from User where  Id =: This.OppObject.OwnerId LIMIT 1];  
		this.UserObject  = usersList[0];
	}

    public  InvoiceSapClases.InvoiceSapDocument FillInvoiceSAPDocument (){
        InvoiceSapClases.InvoiceSapDocument sapContract;
        boolean IsSustitution=false;

		String comentarioVigencia = 'Vigencia del ' + Utils.formatDate(this.OppObject.InicioVigencia_o__c) + ' al '+ Utils.formatDate(this.OppObject.FinVigencia_o__c);
		
		// String actualizaContacto='';
		// //system.debug('account.Cambio_Datos_de_Facturaci_on__c: ' + this.OppObject.Cambio_Datos_de_Facturaci_n__c);
		// if (this.OppObject.Cambio_Datos_de_Facturaci_n__c==false){actualizaContacto='False';}else{actualizaContacto='True';}
			
		List<OpportunityLineItem> oppLines = [SELECT UnitPrice, TotalPrice, Quantity, PricebookEntryId, Discount from OpportunityLineItem where  OpportunityId =: this.OppObject.Id];
		List<InvoiceSapClases.InvoiceSapDocumentLine> documentLines= new List<InvoiceSapClases.InvoiceSapDocumentLine>();
		InvoiceSapClases.InvoiceSapDocumentLine documentLine;
		double discount=0.0;
		if(oppLines.size() > 0){
			for(OpportunityLineItem oppTemp : oppLines)
			{
				if (oppTemp.Discount==null){
					discount=0.0;
				}
				else {
					discount=oppTemp.Discount;
				}
				PricebookEntry priceBook = [SELECT ProductCode 
													FROM PricebookEntry 
													WHERE  Id =: oppTemp.PricebookEntryId
													LIMIT 1];
					documentLine= new InvoiceSapClases.InvoiceSapDocumentLine(priceBook.ProductCode,oppTemp.Quantity*1.0,oppTemp.UnitPrice,discount);
					documentLines.add(documentLine);
				}
			}
        string TipoRelacion='';
        string UUIDRelacion = '';
           
        if (this.OppObject.Sustituir_Datos__c == true){
            TipoRelacion='04';
          	UUIDRelacion = this.OppObject.N_Contable_en_SAP_Sustituida__c;
        }
		String importeFactura='';
		String fechaPagoBanco='';
		if (this.OppObject.Importe__c!=null)
		{
			importeFactura=this.OppObject.Importe__c.toPlainString();
		}
		if (this.OppObject.Fecha_de_Pago_Bancario__c!=null)
		{
			fechaPagoBanco=Utils.formatDate(this.OppObject.Fecha_de_Pago_Bancario__c);
		}
		InvoiceSapClases.InvoiceSapBusinessPertner bp= GetInvoiceSapBusinessPartner();
		InvoiceSapClases.InvoiceSapDocument dc=
			new InvoiceSapClases.InvoiceSapDocument(this.AccObject.Clave_SAP__c, this.OppObject.FechaFactura_o__c, this.OppObject.FechaPago__c, this.OppObject.InicioVigencia_o__c,
					this.OppObject.FinVigencia_o__c,comentarioVigencia,UtilsV2.obtainFacturaType(this.OppObject.Tipo_de_Facturacion__c), UtilsV2.obtainFacturaStatus(this.OppObject.EstatusFactura__c),
					this.OppObject.E_mail_para_env_o_de_Factura_Elect_nica__c,this.OppObject.CreatedDate,this.UserObject.Name,'',0,Utils.getCurrency(this.OppObject.CurrencyIsoCode),this.OppObject.NumFact__c, 
					this.OppObject.Referencia_Bancaria__c,Utils.getInvoiceType(this.OppObject.Tipo_de_Facturacion__c),this.OppObject.Orden_de_Compra__c,this.OppObject.N_mero_de_Proveedor__c,
					TipoRelacion,UUIDRelacion,Utils.getMetodoPagoSAT(this.OppObject.Clave_metodo_pago__c),this.AccObject.Cuenta_Virtual_Banamex__c,this.OppObject.N_mero_de_Cuenta_Pago__c, 
					Utils.getInvoiceClassification(this.OppObject.Clasificaci_n_de_Factura__c), Utils.getInvoiceStatus(this.OppObject.EstatusFactura__c), this.OppObject.Observaciones_SAP__c,
					0, this.OppObject.ClienteSAP_venta_anterior__c, bp, documentLines);
                    sapContract = dc;
		return sapContract; 
	}
    private InvoiceSapClases.InvoiceSapBusinessPertner GetInvoiceSapBusinessPartner (){
		
		Integer ejecutivoName=getAliasVentas(This.OppObject.EjecutivoAsig_o__c);
		String actualizaContacto='';
		if (This.OppObject.Cambio_Datos_de_Facturaci_n__c==false){actualizaContacto='False';}else{actualizaContacto='True';}
		InvoiceSapClases.InvoiceSapAddress ad= 
			new InvoiceSapClases.InvoiceSapAddress( this.AccObject.CalleFact_o__c,this.AccObject.Colonia_de_Facturaci_n__c,this.AccObject.CodigoPostalFact_o__c, '', 
													this.AccObject.DelegMunicFact_o__c,this.AccObject.No_de_Exterior__c, this.AccObject.No_de_interior__c,
													UtilsV2.getCatalogKey('Estados',this.AccObject.EstadoFact_o__c), UtilsV2.getCatalogKey('Paises',this.AccObject.PaisFact_o__c));
		system.debug('ad');
		system.debug(ad);
		InvoiceSapClases.InvoiceSapContact sc= 
			new InvoiceSapClases.InvoiceSapContact(This.ContactObject.FirstName + ' ' + This.ContactObject.LastName, This.ContactObject.Title, This.ContactObject.Phone, This.ContactObject.MobilePhone, This.ContactObject.Email);
		system.debug('sc');
		system.debug(sc);
		InvoiceSapClases.InvoiceSapBusinessPertner bp= 
		     new InvoiceSapClases.InvoiceSapBusinessPertner(this.AccObject.Clave_SAP__c,this.AccObject.RazonSoc_o__c,this.AccObject.Name,this.AccObject.Phone,this.AccObject.RFC_o__c, ejecutivoName,
			 												this.AccObject.Id,This.ContactObject.Id,Utils.TERRITORIOS.get(this.AccObject.EstadoFact_o__c),
			 												calculateDefinicionImpuesto(), utils.getTaxRegime(this.AccObject.Regimen_Fiscal__c), ad, sc);
		system.debug('bp');
		system.debug(bp);
		return bp; 
	}
    private String calculateDefinicionImpuesto(){
		if (OppObject.Tipo_de_Facturacion__c =='Credito - TheNetwork'){
			return IVAP0;
		}
		else {
			return IVAP16;
		}
	}

    // @future(callout=true)
    public static void sendInvoiceToHUB(string oppId){
        facturaSAPToHUB instance = new facturaSAPToHUB(oppId);
        
        InvoiceSapClases.ResponseInvoiceDocument response;
        InvoiceSapClases.InvoiceSapDocument sapContract= instance.FillInvoiceSAPDocument();
        String accessToken = SalesToHub.getAccessToken();
        String bodyRequest = JSON.serialize(sapContract);

        system.debug('Petición SAP'+bodyRequest);


        HttpResponse resp = getBodyRequest(accessToken, bodyRequest);
        response = (InvoiceSapClases.ResponseInvoiceDocument)JSON.deserialize(resp.getBody(), InvoiceSapClases.ResponseInvoiceDocument.class);

        if (resp.getStatusCode() == 200) {
            instance.OppObject.Id_pedido__c = String.valueOf(response.Data.DocNum);
            instance.OppObject.NunOfertafisica_o__c = String.valueOf(response.Data.DocumentoRelacionado.DocNum);
            instance.OppObject.Factura_Aprobada__c = true;
            if(response.Data.DocumentoRelacionado.DocNum == null){
                instance.OppObject.Mensaje_SAP__c = 'Factura en proceso';
            }else{
                instance.OppObject.Mensaje_SAP__c = response.Data.DocumentoRelacionado.Mensaje;
            }
        }
        if(resp.getStatusCode() == 409){
            if(String.valueOf(response.Data.DocNum) == instance.OppObject.Id_pedido__c){
                instance.OppObject.Mensaje_SAP__c = response.Data.Mensaje;
                instance.OppObject.NunOfertafisica_o__c = String.valueOf(response.Data.DocumentoRelacionado.DocNum);
            }
        }
        else{
            instance.OppObject.Mensaje_SAP__c = 'Error de prueba '+resp.getStatusCode();
        }
        instance.OppObject.Fecha_de_Pedido__c = Datetime.now();
        instance.OppObject.Fecha_de_solicitud_factura__c = Datetime.now();

        update instance.OppObject;
    }

    private static HttpResponse getBodyRequest(String accessToken, String bodyRequest){
        OCCMClasesV2.ServiceConnRequest conn = new OCCMClasesV2.ServiceConnRequest();
            conn.UserName = IntegrationServices__c.getValues('InvoiceToHub').UserName__c;
            //    conn.Password = IntegrationServices__c.getValues('CTSalesHub').Password__c;
            conn.UrlService = IntegrationServices__c.getValues('InvoiceToHub').URLService__c;
            conn.UrlMethod = IntegrationServices__c.getValues('InvoiceToHub').URLMethod__c;
            conn.Accept = IntegrationServices__c.getValues('InvoiceToHub').Accept__c;
            conn.ContentType = IntegrationServices__c.getValues('InvoiceToHub').Content_Type__c;
            conn.IsActive = IntegrationServices__c.getValues('InvoiceToHub').IsActive__c;
            conn.NumAttempts = Integer.valueOf(IntegrationServices__c.getValues('InvoiceToHub').NumAttempts__c);
            conn.DelayMillis = Integer.valueOf(IntegrationServices__c.getValues('InvoiceToHub').DelayMillis__c);
            conn.GeneratedToken ='Bearer '+ accessToken;

            httputils http = new httputils(conn);
            HttpResponse res = http.MakeAuthorizedCallOut(bodyRequest, 200);

            return res;
    }
}
                                           ## Output:
$PSCommandPath                             ## C:\Users\user\Documents\code\ps\test.ps1
(Get-Item $PSCommandPath ).Extension       ## .ps1
(Get-Item $PSCommandPath ).Basename        ## test
(Get-Item $PSCommandPath ).Name            ## test.ps1
(Get-Item $PSCommandPath ).DirectoryName   ## C:\Users\user\Documents\code\ps
(Get-Item $PSCommandPath ).FullName        ## C:\Users\user\Documents\code\ps\test.ps1

$_. = $PSItem

$_.Extension        ## .ps1
$_.Basename         ## test
$_.Name             ## test.ps1
$_.DirectoryName    ## C:\Users\user\Documents\code\ps
$_.FullName         ## C:\Users\user\Documents\code\ps\test.ps1

$ConfigINI = (Get-Item $PSCommandPath ).DirectoryName+"\"+(Get-Item $PSCommandPath ).BaseName+".ini"

$ConfigINI                                 ## C:\Users\user\Documents\code\ps\test.ini
[ExtensionOf(tableStr(PurchReqLine))]
final class PurchReqLine_ADI_Finance_Extension
{
    public display real TotalBudget()
    {
        AccountingDistribution  AccountingDistribution;
        BudgetTransactionHeader BudgetTransactionHeader;
        BudgetTransactionLine   BudgetTransactionLine;
        DimensionAttributeValueCombination  ValueCombination, ValueCombinationBudget;

        select AccountingDistribution where AccountingDistribution.SourceDocumentLine == this.SourceDocumentLine;
        select ValueCombination 
            where ValueCombination.RecId == AccountingDistribution.LedgerDimension
            && ValueCombination.LedgerDimensionType == LedgerDimensionType::Account;
        while select ValueCombinationBudget 
            //where ValueCombinationBudget.DisplayValue == ValueCombination.DisplayValue
            where ValueCombinationBudget.MainAccount == ValueCombination.MainAccount
            && ValueCombinationBudget.LedgerDimensionType == LedgerDimensionType::Budget
        {
            if(this.getAttr(ValueCombination, 'DEPARTMENT') == this.getAttr(ValueCombinationBudget, 'DEPARTMENT')
                && this.getAttr(ValueCombination, 'COSTCENTER') == this.getAttr(ValueCombinationBudget, 'COSTCENTER'))
                break;
        }

        real amount;
        while select BudgetTransactionLine
            join BudgetTransactionHeader
            where BudgetTransactionLine.BudgetTransactionHeader == BudgetTransactionHeader.RecId
            && BudgetTransactionHeader.BudgetModelDataAreaId == curExt()
            && BudgetTransactionHeader.TransactionStatus == BudgetTransactionStatus::Completed
            && BudgetTransactionLine.LedgerDimension == ValueCombinationBudget.RecId
        {
            if(year(BudgetTransactionLine.Date) == year(this.purchReqTable().TransDate))
                amount+=BudgetTransactionLine.TransactionCurrencyAmount;

        }

        return amount;
    }

    protected RecId getAttr(DimensionAttributeValueCombination _combination,
        Name _attributeName)
    {
        DimensionAttributeLevelValueView valueView;
        DimensionAttribute attribute = DimensionAttribute::findByName(_attributeName);
        ;
        select DisplayValue from valueView
            where valueView.ValueCombinationRecId == _combination.recId
            && valueView.DimensionAttribute == attribute.RecId;
        return valueView.EntityInstance;
    }

}
<script>
  jQuery(document).ready(function($){
    var currentUrl = window.location.href;

    // Create a mapping of URLs and their respective links
    var urlMapping = {
      "https://www.basquedestination.com/en/rioja-alavesa-visita-a-la-cuna-del-vino/": "https://fareharbor.com/embeds/book/basquedestination/items/609105/?full-items=yes&flow=1338016",
      "https://www.basquedestination.com/en/tour-arquitectonico-de-bilbao-y-puente-de-bizcaia/": "https://fareharbor.com/embeds/book/basquedestination/items/609008/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/en/tour-pais-vasco-frances/": "https://fareharbor.com/embeds/book/basquedestination/items/609056/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/pintxotour-privado-en-bilbao-3/": "https://fareharbor.com/embeds/book/basquedestination/items/608963/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/tesoros-de-la-costa-vasca-zarautz-y-getaria-1/": "https://fareharbor.com/embeds/book/basquedestination/items/609084/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/costa-vasca-helicoptero/": "https://fareharbor.com/embeds/book/basquedestination/items/609061/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/entre-fogones-con-un-cocinero-sociedad-gastronomica/": "https://fareharbor.com/embeds/book/basquedestination/items/608992/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/getaria-villa-marinera/": "https://fareharbor.com/embeds/book/basquedestination/items/608997/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pintxotour-privado-en-san-sebastian-4/": "https://fareharbor.com/embeds/book/basquedestination/items/608963/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pintxotour-privado-en-bilbao/": "https://fareharbor.com/embeds/book/basquedestination/items/609109/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/pesca-en-la-costa-vasca-ondare-experiencias-hotel-arbaso-basque-destination/": "https://fareharbor.com/embeds/book/basquedestination/items/609087/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/surfea-con-locales/": "https://fareharbor.com/embeds/book/basquedestination/items/609093/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/la-sidra-y-el-mar/": "https://fareharbor.com/embeds/book/basquedestination/items/609064/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/miercoles-de-mercado-y-queso/": "https://fareharbor.com/embeds/book/basquedestination/items/609066/?full-items=yes&flow=1338004",
      "https://www.basquedestination.com/es/visita-cultural-privada-de-bilbao-y-museo-guggenheim/": "https://fareharbor.com/embeds/book/basquedestination/items/609004/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/es/recorrido-a-pie-por-la-costa-vasca-y-el-flysch/": "https://fareharbor.com/embeds/book/basquedestination/items/609104/?full-items=yes&flow=1337998",
      "https://www.basquedestination.com/es/rioja-alavesa-3/": "https://fareharbor.com/embeds/book/basquedestination/items/609105/?full-items=yes&flow=1338016",
      "https://www.basquedestination.com/es/tierra-de-san-ignacio-la-ruta-de-los-tres-templos-1/": "https://fareharbor.com/embeds/book/basquedestination/items/608996/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/visita-cultural-privada-de-donostia-san-sebastian/": "https://fareharbor.com/embeds/book/basquedestination/items/608980/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/rioja-alavesa-visita-a-la-cuna-del-vino/": "https://fareharbor.com/embeds/book/basquedestination/items/609112/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/excursion-a-bilbao-nuestra-ciudad-mas-vanguardista/": "https://fareharbor.com/embeds/book/basquedestination/items/609002/?full-items=yes&flow=1337994",
      "https://www.basquedestination.com/es/banos-de-bosque-ondare-experiencias-hotel-arbaso-basque-destinaton/": "https://fareharbor.com/embeds/book/basquedestination/items/609089/?full-items=yes&flow=1335791",
      "https://www.basquedestination.com/es/vitoria-y-el-valle-salado-de-anana-1/": "https://fareharbor.com/embeds/book/basquedestination/items/609095/?full-items=yes&flow=1338020",
      "https://www.basquedestination.com/en/vitoria-y-el-valle-salado-de-anana-6/": "https://fareharbor.com/embeds/book/basquedestination/items/609000/?full-items=yes&flow=1338020"
    };

    // Check if the current URL matches one of the keys in the mapping
    if(urlMapping[currentUrl]) {
      // Find the button using the provided selector and update its link
      $('.reservaExperiencia').attr('href', urlMapping[currentUrl]);
    }
  });
</script>
.section--topics {
    background-image: url(../images/bg-sp.jpg);
    background-position: center top;
    background-repeat: no-repeat;
    background-size: cover;
}
API URL : https://sag.sanabil.com/gateway/COA_API/1.0/COAStatus
API KEY : x-Gateway-APIKey: 86e7ad1e-c84f-438a-a309-cd1216565dab
Launch your own White-Label Crypto Exchange with advanced trading features and top-tier security. Customize your platform with seamless UI, multi-asset support, and liquidity solutions. Ensure high-speed transactions with a secure, scalable infrastructure. Empower traders with an intuitive dashboard and robust risk management tools. Opris offers cutting-edge white-label solutions for your crypto exchange success.

Vist us >> https://www.opris.exchange/white-label-cryptocurrency-exchange-software/
It's possible to work on an entire folder tree, or a particular subset of files, just using the output of some other command through a pipe. Something like:

 	C:\TrID>dir d:\recovered_drive /s /b | trid -ce -@
   
 Definitions found:  5702
 Analyzing...

 File: d:\recovered_drive\notes
 100.0% (.RTF) Rich Text Format (5000/1)

 File: d:\recovered_drive\temp\FILE0001.CHK                           
  77.8% (.OGG) OGG Vorbis Audio (14014/3)
Supported languages
This is the list of all 297 languages currently supported by Prism, with their corresponding alias, to use in place of xxxx in the language-xxxx (or lang-xxxx) class:

Markup - markup, html, xml, svg, mathml, ssml, atom, rss
CSS - css
C-like - clike
JavaScript - javascript, js
ABAP - abap
ABNF - abnf
ActionScript - actionscript
Ada - ada
Agda - agda
AL - al
ANTLR4 - antlr4, g4
Apache Configuration - apacheconf
Apex - apex
APL - apl
AppleScript - applescript
AQL - aql
Arduino - arduino, ino
ARFF - arff
ARM Assembly - armasm, arm-asm
Arturo - arturo, art
AsciiDoc - asciidoc, adoc
ASP.NET (C#) - aspnet
6502 Assembly - asm6502
Atmel AVR Assembly - asmatmel
AutoHotkey - autohotkey
AutoIt - autoit
AviSynth - avisynth, avs
Avro IDL - avro-idl, avdl
AWK - awk, gawk
Bash - bash, sh, shell
BASIC - basic
Batch - batch
BBcode - bbcode, shortcode
BBj - bbj
Bicep - bicep
Birb - birb
Bison - bison
BNF - bnf, rbnf
BQN - bqn
Brainfuck - brainfuck
BrightScript - brightscript
Bro - bro
BSL (1C:Enterprise) - bsl, oscript
C - c
C# - csharp, cs, dotnet
C++ - cpp
CFScript - cfscript, cfc
ChaiScript - chaiscript
CIL - cil
Cilk/C - cilkc, cilk-c
Cilk/C++ - cilkcpp, cilk-cpp, cilk
Clojure - clojure
CMake - cmake
COBOL - cobol
CoffeeScript - coffeescript, coffee
Concurnas - concurnas, conc
Content-Security-Policy - csp
Cooklang - cooklang
Coq - coq
Crystal - crystal
CSS Extras - css-extras
CSV - csv
CUE - cue
Cypher - cypher
D - d
Dart - dart
DataWeave - dataweave
DAX - dax
Dhall - dhall
Diff - diff
Django/Jinja2 - django, jinja2
DNS zone file - dns-zone-file, dns-zone
Docker - docker, dockerfile
DOT (Graphviz) - dot, gv
EBNF - ebnf
EditorConfig - editorconfig
Eiffel - eiffel
EJS - ejs, eta
Elixir - elixir
Elm - elm
Embedded Lua templating - etlua
ERB - erb
Erlang - erlang
Excel Formula - excel-formula, xlsx, xls
F# - fsharp
Factor - factor
False - false
Firestore security rules - firestore-security-rules
Flow - flow
Fortran - fortran
FreeMarker Template Language - ftl
GameMaker Language - gml, gamemakerlanguage
GAP (CAS) - gap
G-code - gcode
GDScript - gdscript
GEDCOM - gedcom
gettext - gettext, po
Gherkin - gherkin
Git - git
GLSL - glsl
GN - gn, gni
GNU Linker Script - linker-script, ld
Go - go
Go module - go-module, go-mod
Gradle - gradle
GraphQL - graphql
Groovy - groovy
Haml - haml
Handlebars - handlebars, hbs, mustache
Haskell - haskell, hs
Haxe - haxe
HCL - hcl
HLSL - hlsl
Hoon - hoon
HTTP - http
HTTP Public-Key-Pins - hpkp
HTTP Strict-Transport-Security - hsts
IchigoJam - ichigojam
Icon - icon
ICU Message Format - icu-message-format
Idris - idris, idr
.ignore - ignore, gitignore, hgignore, npmignore
Inform 7 - inform7
Ini - ini
Io - io
J - j
Java - java
JavaDoc - javadoc
JavaDoc-like - javadoclike
Java stack trace - javastacktrace
Jexl - jexl
Jolie - jolie
JQ - jq
JSDoc - jsdoc
JS Extras - js-extras
JSON - json, webmanifest
JSON5 - json5
JSONP - jsonp
JS stack trace - jsstacktrace
JS Templates - js-templates
Julia - julia
Keepalived Configure - keepalived
Keyman - keyman
Kotlin - kotlin, kt, kts
KuMir (КуМир) - kumir, kum
Kusto - kusto
LaTeX - latex, tex, context
Latte - latte
Less - less
LilyPond - lilypond, ly
Liquid - liquid
Lisp - lisp, emacs, elisp, emacs-lisp
LiveScript - livescript
LLVM IR - llvm
Log file - log
LOLCODE - lolcode
Lua - lua
Magma (CAS) - magma
Makefile - makefile
Markdown - markdown, md
Markup templating - markup-templating
Mata - mata
MATLAB - matlab
MAXScript - maxscript
MEL - mel
Mermaid - mermaid
METAFONT - metafont
Mizar - mizar
MongoDB - mongodb
Monkey - monkey
MoonScript - moonscript, moon
N1QL - n1ql
N4JS - n4js, n4jsd
Nand To Tetris HDL - nand2tetris-hdl
Naninovel Script - naniscript, nani
NASM - nasm
NEON - neon
Nevod - nevod
nginx - nginx
Nim - nim
Nix - nix
NSIS - nsis
Objective-C - objectivec, objc
OCaml - ocaml
Odin - odin
OpenCL - opencl
OpenQasm - openqasm, qasm
Oz - oz
PARI/GP - parigp
Parser - parser
Pascal - pascal, objectpascal
Pascaligo - pascaligo
PATROL Scripting Language - psl
PC-Axis - pcaxis, px
PeopleCode - peoplecode, pcode
Perl - perl
PHP - php
PHPDoc - phpdoc
PHP Extras - php-extras
PlantUML - plant-uml, plantuml
PL/SQL - plsql
PowerQuery - powerquery, pq, mscript
PowerShell - powershell
Processing - processing
Prolog - prolog
PromQL - promql
.properties - properties
Protocol Buffers - protobuf
Pug - pug
Puppet - puppet
Pure - pure
PureBasic - purebasic, pbfasm
PureScript - purescript, purs
Python - python, py
Q# - qsharp, qs
Q (kdb+ database) - q
QML - qml
Qore - qore
R - r
Racket - racket, rkt
Razor C# - cshtml, razor
React JSX - jsx
React TSX - tsx
Reason - reason
Regex - regex
Rego - rego
Ren'py - renpy, rpy
ReScript - rescript, res
reST (reStructuredText) - rest
Rip - rip
Roboconf - roboconf
Robot Framework - robotframework, robot
Ruby - ruby, rb
Rust - rust
SAS - sas
Sass (Sass) - sass
Sass (SCSS) - scss
Scala - scala
Scheme - scheme
Shell session - shell-session, sh-session, shellsession
Smali - smali
Smalltalk - smalltalk
Smarty - smarty
SML - sml, smlnj
Solidity (Ethereum) - solidity, sol
Solution file - solution-file, sln
Soy (Closure Template) - soy
SPARQL - sparql, rq
Splunk SPL - splunk-spl
SQF: Status Quo Function (Arma 3) - sqf
SQL - sql
Squirrel - squirrel
Stan - stan
Stata Ado - stata
Structured Text (IEC 61131-3) - iecst
Stylus - stylus
SuperCollider - supercollider, sclang
Swift - swift
Systemd configuration file - systemd
T4 templating - t4-templating
T4 Text Templates (C#) - t4-cs, t4
T4 Text Templates (VB) - t4-vb
TAP - tap
Tcl - tcl
Template Toolkit 2 - tt2
Textile - textile
TOML - toml
Tremor - tremor, trickle, troy
Turtle - turtle, trig
Twig - twig
TypeScript - typescript, ts
TypoScript - typoscript, tsconfig
UnrealScript - unrealscript, uscript, uc
UO Razor Script - uorazor
URI - uri, url
V - v
Vala - vala
VB.Net - vbnet
Velocity - velocity
Verilog - verilog
VHDL - vhdl
vim - vim
Visual Basic - visual-basic, vb, vba
WarpScript - warpscript
WebAssembly - wasm
Web IDL - web-idl, webidl
WGSL - wgsl
Wiki markup - wiki
Wolfram language - wolfram, mathematica, nb, wl
Wren - wren
Xeora - xeora, xeoracube
XML doc (.net) - xml-doc
Xojo (REALbasic) - xojo
XQuery - xquery
YAML - yaml, yml
YANG - yang
Zig - zig
Wow! The future is here, and AI is your golden ticket to success! From mind-blowing automation to game-changing AI solutions, the opportunities are limitless. Imagine building a business that runs on innovation, scales effortlessly, and generates massive profits!

Ready to ride the AI wave and turn your vision into reality? The time to start is NOW! 
  
Visit : https://www.dappfort.com/blog/ai-business-ideas/

Instant Reach Experts:

Visit us :  https://www.dappfort.com/cryptocurrency-exchange-development-company/      
Contact : +91 8838534884
Mail : sales@dappfort.com
document.addEventListener("visibilitychange", function(event) {
    Object.defineProperty(document, "hidden", { value: false, configurable: true });
    Object.defineProperty(document, "visibilityState", { value: "visible", configurable: true });
});
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages/NetBSD/$(uname -p)/$(uname -r|cut -f '1 2' -d.)/All/"
# export PKG_PATH
# pkg_add pkgin
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple, Optional, Union, Any
from sklearn.metrics.pairwise import cosine_similarity
import logging
import os
import time
import json
from functools import lru_cache
import hashlib

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class TextEmbedder:
    def __init__(self, api_key: str, cache_dir: Optional[str] = None, 
                batch_size: int = 10, max_retries: int = 3, 
                retry_delay: int = 2):

        self.api_key = 'AIzaSyCoNC4SCFhrO8QvD34a9KMqyNQ-mudMtQ4'
        self.model = "models/text-embedding-004"
        self.cache_dir = cache_dir
        self.embedding_cache = {}
        self.batch_size = batch_size
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        
      
        self._genai = None
        
      
        if cache_dir:
            os.makedirs(cache_dir, exist_ok=True)
            self._load_cache()
    
    def _get_genai(self):
       
        if self._genai is None:
            import google.generativeai as genai
            genai.configure(api_key=self.api_key)
            self._genai = genai
        return self._genai
    
    def _load_cache(self):
      
        if not self.cache_dir:
            return
            
        cache_file = os.path.join(self.cache_dir, "embedding_cache.json")
        if os.path.exists(cache_file):
            try:
                with open(cache_file, 'r') as f:
                    self.embedding_cache = json.load(f)
                logger.info(f"Loaded {len(self.embedding_cache)} cached embeddings")
            except Exception as e:
                logger.error(f"Error loading cache: {str(e)}")
    
    def _save_cache(self):
       
        if not self.cache_dir:
            return
            
        cache_file = os.path.join(self.cache_dir, "embedding_cache.json")
        try:
          
            cache_subset = dict(list(self.embedding_cache.items())[-10000:])
            with open(cache_file, 'w') as f:
                json.dump(cache_subset, f)
            logger.info(f"Saved {len(cache_subset)} embeddings to cache")
        except Exception as e:
            logger.error(f"Error saving cache: {str(e)}")
    
    def _hash_text(self, text: str) -> str:
      
        return hashlib.md5(text.encode('utf-8')).hexdigest()
    
    def _combine_text_features(self, row: Union[pd.Series, Dict], text_columns: List[str]) -> str:
       
        text_values = []
        
      
        if isinstance(row, pd.Series):
            for col in text_columns:
                if col in row.index and pd.notna(row[col]):
                    text_values.append(f"{col}: {str(row[col])}")
        else:
            for col in text_columns:
                if col in row and row[col] is not None:
                    text_values.append(f"{col}: {str(row[col])}")
                    
        return " | ".join(text_values)
    
  
    def get_brand_text_features(self, brand: Union[pd.Series, Dict]) -> str:
      
        if isinstance(brand, pd.Series):
            brand_dict = brand.to_dict()
        else:
            brand_dict = brand
            
        text_columns = [
            'industry',
            'target_audience',
            'brand_messaging',
            'tone_voice',
            'category_alignment',
            'brand_alignment_keywords',
            'content_type'
        ]
        
        text = self._combine_text_features(brand_dict, text_columns)
        return text
    
  
    def get_influencer_text_features(self, influencer: Union[pd.Series, Dict]) -> str:
       
        if isinstance(influencer, pd.Series):
            influencer_dict = influencer.to_dict()
        else:
            influencer_dict = influencer
            
        text_columns = [
            'category_niche',
            'audience_demographics',
            'audience_interests',
            'content_types'
        ]
        
        text = self._combine_text_features(influencer_dict, text_columns)
        return text
    
    def get_embedding(self, text: str) -> np.ndarray:
       
        if not text or text.strip() == "":
            return np.zeros(1024)
            
      
        text_hash = self._hash_text(text)
        if text_hash in self.embedding_cache:
            return np.array(self.embedding_cache[text_hash])
        
       
        for attempt in range(self.max_retries):
            try:
                genai = self._get_genai()
                result = genai.embed_content(
                    model=self.model,
                    content=text
                )
                
                embedding = np.array(result['embedding'])
                
           
                self.embedding_cache[text_hash] = embedding.tolist()
          
                if len(self.embedding_cache) % 100 == 0:
                    self._save_cache()
                    
                return embedding
            except Exception as e:
                logger.error(f"Error getting embedding (attempt {attempt+1}/{self.max_retries}): {str(e)}")
                if attempt < self.max_retries - 1:
                    time.sleep(self.retry_delay)
        
       
        logger.error(f"All embedding attempts failed for text: {text[:100]}...")
        return np.zeros(1024)
    
    def batch_get_embeddings(self, texts: List[str]) -> List[np.ndarray]:
      
        results = []
        
       
        for i in range(0, len(texts), self.batch_size):
            batch = texts[i:i+self.batch_size]
            
         
            batch_results = []
            for text in batch:
                embedding = self.get_embedding(text)
                batch_results.append(embedding)
            
            results.extend(batch_results)
            
           
            if i + self.batch_size < len(texts):
                time.sleep(0.5) 
        
        return results
    
    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
        
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
      
        similarity = cosine_similarity(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0]
        
        return float(similarity)
    
    def calculate_batch_similarities(self, brand_texts: List[str], 
                                  influencer_texts: List[str]) -> np.ndarray:
       
        brand_embeddings = self.batch_get_embeddings(brand_texts)
        influencer_embeddings = self.batch_get_embeddings(influencer_texts)
        
     
        brand_matrix = np.vstack(brand_embeddings)
        influencer_matrix = np.vstack(influencer_embeddings)
        
      
        similarity_matrix = cosine_similarity(brand_matrix, influencer_matrix)
        
        return similarity_matrix
    
    def print_detailed_match_analysis(self, brand: Union[pd.Series, Dict], 
                                   influencer: Union[pd.Series, Dict], 
                                   similarity_score: float):
  
        logger.info("=" * 80)
        
       
        brand_id = brand.name if isinstance(brand, pd.Series) else brand.get('brand_id', 'Unknown')
        brand_name = brand.get('name', 'Unknown Brand') if isinstance(brand, pd.Series) else brand.get('name', 'Unknown Brand')
        
       
        influencer_id = influencer.name if isinstance(influencer, pd.Series) else influencer.get('influencer_id', 'Unknown')
        influencer_name = influencer.get('name', 'Unknown Influencer') if isinstance(influencer, pd.Series) else influencer.get('name', 'Unknown Influencer')
        
      
        logger.info("Brand Details:")
        logger.info(f"  ID: {brand_id}")
        logger.info(f"  Name: {brand_name}")
        
      
        logger.info("\nInfluencer Details:")
        logger.info(f"  ID: {influencer_id}")
        logger.info(f"  Name: {influencer_name}")
        logger.info("-" * 80)
        
      
        logger.info("\nBrand Text Features:")
        brand_text = self.get_brand_text_features(brand)
        for feature in brand_text.split(" | "):
            logger.info(f"  - {feature}")
            
        logger.info("\nInfluencer Text Features:")
        influencer_text = self.get_influencer_text_features(influencer)
        for feature in influencer_text.split(" | "):
            logger.info(f"  - {feature}")
        
       
        logger.info("\nText Similarity Analysis:")
        logger.info(f"  Score: {similarity_score:.4f}")
        
       
        logger.info("\nScore Interpretation:")
        if similarity_score >= 0.8:
            logger.info("  Excellent Match (≥0.8):")
            logger.info("  - Very strong text similarity")
            logger.info("  - High potential for successful collaboration")
            logger.info("  - Strong alignment in multiple areas")
        elif similarity_score >= 0.6:
            logger.info("  Good Match (≥0.6):")
            logger.info("  - Significant text similarity")
            logger.info("  - Good potential for collaboration")
            logger.info("  - Notable alignment in key areas")
        elif similarity_score >= 0.4:
            logger.info("  Moderate Match (≥0.4):")
            logger.info("  - Some text similarity")
            logger.info("  - Potential for collaboration with careful consideration")
            logger.info("  - Partial alignment in some areas")
        else:
            logger.info("  Weak Match (<0.4):")
            logger.info("  - Limited text similarity")
            logger.info("  - May need to reconsider match")
            logger.info("  - Limited alignment in key areas")
        
        logger.info("=" * 80)
    
    def get_text_similarity_matrix(self, brands_df: pd.DataFrame, 
                                influencers_df: pd.DataFrame, 
                                batch_size: int = 10) -> np.ndarray:

        start_time = time.time()
        logger.info(f"Calculating text similarity matrix for {len(brands_df)} brands and {len(influencers_df)} influencers")
        
       
        if self.cache_dir:
            cache_path = os.path.join(self.cache_dir, f"text_similarity_{len(brands_df)}_{len(influencers_df)}.npz")
            if os.path.exists(cache_path):
                logger.info(f"Loading text similarity matrix from cache: {cache_path}")
                data = np.load(cache_path)
                matrix = data['matrix']
                logger.info(f"Loaded text similarity matrix in {time.time() - start_time:.2f} seconds")
                return matrix
        
      
        similarity_matrix = np.zeros((len(brands_df), len(influencers_df)))
        
   
        for i in range(0, len(brands_df), batch_size):
            brand_chunk = brands_df.iloc[i:i+batch_size]
            brand_texts = [self.get_brand_text_features(brand) for _, brand in brand_chunk.iterrows()]
            
            for j in range(0, len(influencers_df), batch_size):
                influencer_chunk = influencers_df.iloc[j:j+batch_size]
                influencer_texts = [self.get_influencer_text_features(influencer) for _, influencer in influencer_chunk.iterrows()]
                
             
                batch_similarities = self.calculate_batch_similarities(brand_texts, influencer_texts)
                
              
                for bi, (brand_idx, _) in enumerate(brand_chunk.iterrows()):
                    for ii, (influencer_idx, _) in enumerate(influencer_chunk.iterrows()):
                        global_brand_idx = brands_df.index.get_loc(brand_idx)
                        global_influencer_idx = influencers_df.index.get_loc(influencer_idx)
                        similarity_matrix[global_brand_idx, global_influencer_idx] = batch_similarities[bi, ii]
                
                logger.info(f"Processed batch: brands {i}-{i+len(brand_chunk)-1}, influencers {j}-{j+len(influencer_chunk)-1}")
        
      
        if self.cache_dir:
            logger.info(f"Saving text similarity matrix to cache: {cache_path}")
            np.savez_compressed(cache_path, matrix=similarity_matrix)
        
        logger.info(f"Text similarity matrix calculation completed in {time.time() - start_time:.2f} seconds")
        return similarity_matrix
    
    def save_similarity_scores(self, brands_df: pd.DataFrame, 
                            influencers_df: pd.DataFrame,
                            output_path: str):
       
        logger.info(f"Calculating and saving similarity scores to {output_path}")
        start_time = time.time()
        
        all_scores = []
        batch_size = 5  
        
     
        for i in range(0, len(brands_df), batch_size):
            brand_chunk = brands_df.iloc[i:i+batch_size]
            
            for j in range(0, len(influencers_df), batch_size):
                influencer_chunk = influencers_df.iloc[j:j+batch_size]
                
                # Calculate batch scores
                for _, brand in brand_chunk.iterrows():
                    brand_text = self.get_brand_text_features(brand)
                    
                    for _, influencer in influencer_chunk.iterrows():
                        influencer_text = self.get_influencer_text_features(influencer)
                        similarity = self.calculate_text_similarity(brand_text, influencer_text)
                        
                        all_scores.append({
                            'brand_id': brand.name,
                            'brand_name': brand.get('name', 'Unknown Brand'),
                            'influencer_id': influencer.name,
                            'influencer_name': influencer.get('name', 'Unknown Influencer'),
                            'similarity_score': similarity,
                            'brand_text': brand_text,
                            'influencer_text': influencer_text
                        })
                
                logger.info(f"Processed scores for brands {i}-{i+len(brand_chunk)-1}, influencers {j}-{j+len(influencer_chunk)-1}")
        
      
        scores_df = pd.DataFrame(all_scores)
        scores_df = scores_df.sort_values('similarity_score', ascending=False)
        
      
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        scores_df.to_csv(output_path, index=False)
        
        logger.info(f"Saved {len(scores_df)} similarity scores to {output_path} in {time.time() - start_time:.2f} seconds")
#include <iostream>
using namespace std;

int Partition(int a[], int low, int high)
{
  int pivot = a[low];
  int i = low;
  int j = high;
  
  while(i < j)
  {
    while(a[i] <= pivot && i <= high - 1)
      ++i;
      
    while(a[j] > pivot && j >= low + 1)
      --j;
      
    if(i < j)
      swap(a[i], a[j]);
  }
  
  swap(a[low], a[j]);
  
  return j;
}

void QuickSort(int a[], int low, int high)
{
  if(low < high)
  {
    int pivotIndex = Partition(a, low, high);
    
    QuickSort(a, low, pivotIndex - 1);
    QuickSort(a, pivotIndex + 1, high);
  }
}

int main() 
{
  int n;
  cin >> n;
  
  int a[n];
  for(int i = 0; i < n; ++i)
    cin >> a[i];
    
  QuickSort(a, 0, n-1);
    
  for(int i = 0; i < n; ++i)
    cout << a[i] << " ";
    
  return 0;
}
{% if product.metafields.custom.file_url != blank %}
  <ul>
    {% for url in product.metafields.custom.file_url.value %}
      <li>
        <a href="{{ url }}" target="_blank">📄 Download PDF</a>
      </li>
    {% endfor %}
  </ul>
{% endif %}
let
  Conditions = {
    [ColumnName] = "1501", 
    [ColumnName] = "1500", 
    [ColumnName] >= "1100" and [ColumnName] < "1200"
  }, 
  Results = {"FFPC", "PCFam", "Différentielle"}
in
  Results{List.PositionOf(Conditions, true)}
Make changes in one product first to see the id_tax_rules_group then do the bulk edit:


UPDATE `ps_product` SET `id_tax_rules_group`='2' WHERE  `id_tax_rules_group`=1;

UPDATE `ps_product_shop` SET `id_tax_rules_group`='2' WHERE  `id_tax_rules_group`=1;
background-image: url(../images/icon-arrow.svg);
background-size: contain;
background-position: center;
White Label Cryptocurrency Exchange Script is a ready-made solution for creating crypto exchanges quickly and securely with advanced functionalities. With this script, entrepreneurs can make customizations based on their needs and start a crypto exchange business quickly.
Rental Script is a set of software solution that is developed to facilitate the management of rental services effectively. These scripts are commonly used by ondemand app https://appticz.com/on-demand-app-development businesses engaged in providing services such as car rentals, equipment rentals, vacation rentals, and even event planning services
from crewai import Agent, Task, Crew ,LLM
from langchain_openai import ChatOpenAI
from crewai_tools import ScrapeWebsiteTool
import os
from dotenv import load_dotenv
from langchain_google_genai import ChatGoogleGenerativeAI

load_dotenv()
api_key = 'AIzaSyBmJ8zlaygJaeMYsxL88e-PzGke70gGFEI'
# call gemini model
# llm = ChatGoogleGenerativeAI(model='gemini/gemini-1.5-flash',
#                             verbose=True,
#                             temperature=0.5,
#                             goggle_api_key=api_key)  
llm = LLM(
    api_key=api_key,
    model="gemini/gemini-1.5-flash",
)

# load_dotenv()
# Model = 'gpt-3.5-turbo'
# llm = ChatOpenAI(model=Model,api_key=api_key)

# Instantiate tools
site = 'https://www.simplifymoney.in/'
web_scrape_tool = ScrapeWebsiteTool(website_url=site)

# Create agents
web_scraper_agent = Agent(
    role='Web Scraper',
    goal='Effectively Scrape data on the websites for your company',
    backstory='''You are expert web scraper, your job is to scrape all the data for
                your company from a given website.
                ''',
    tools=[web_scrape_tool],
    verbose=True,
    llm = llm
)


# Define tasks
web_scraper_task = Task(
    description='Scrape all the  data on the site so your company can use for decision making.',
    expected_output='All the content of the website.',
    agent=web_scraper_agent,
    output_file = 'data.txt'
)


# Assemble a crew
crew = Crew(
    agents=[web_scraper_agent],
    tasks=[web_scraper_task],
    verbose=True,
)

# Execute tasks
result = crew.kickoff()
print(result.raw)

with open('results.txt', 'w') as f:
    f.write(result.raw)
import pandas as pd
import numpy as np
from typing import List, Dict, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
import google.generativeai as genai
import logging
from functools import lru_cache
import re


logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)



class TextEmbedder:
    def __init__(self, api_key: str, model_name: str = "models/text-embedding-004", batch_size: int = 50):
        genai.configure(api_key='AIzaSyCoNC4SCFhrO8QvD34a9KMqyNQ-mudMtQ4')
        self.model = model_name
        self.batch_size = batch_size
        self.embedding_dim = self._get_model_dimension()
        logger.info(f"Initialized with embedding dimension: {self.embedding_dim}")

    def _get_model_dimension(self) -> int:
       
        try:
            test_embedding = genai.embed_content(
                model=self.model,
                content="dimension test",
                task_type="RETRIEVAL_DOCUMENT"
            )['embedding']
            return len(test_embedding)
        except Exception as e:
            logger.error(f"Failed to get model dimension: {str(e)}")
            logger.info("Defaulting to 768 dimensions")
            return 768 

    def _preprocess_text(self, text: str) -> str:
      
        text = text.lower().strip()
        text = re.sub(r'\s+', ' ', text) 
        text = re.sub(r'[^\w\s|:-]', '', text) 
        return text

    def _combine_text_features(self, row: pd.Series, text_columns: List[str]) -> str:
       
        features = []
        for col in text_columns:
            if col in row and pd.notna(row[col]):
                value = str(row[col])
                if "|" in value: 
                    value = value.replace("|", ",")
                features.append(f"{col}:{value}")
        return self._preprocess_text(" | ".join(features))

    def get_brand_text_features(self, brand: pd.Series) -> str:
        text_columns = [
            'industry', 'target_audience', 'brand_messaging',
            'tone_voice', 'category_alignment', 
            'brand_alignment_keywords', 'content_type'
        ]
        return self._combine_text_features(brand, text_columns)

    def get_influencer_text_features(self, influencer: pd.Series) -> str:
        text_columns = [
            'category_niche', 'audience_demographics',
            'audience_interests', 'content_types'
        ]
        return self._combine_text_features(influencer, text_columns)

    @lru_cache(maxsize=5000)
    def get_embedding(self, text: str) -> np.ndarray:
      
        if not text.strip():
            return np.zeros(self.embedding_dim)
            
        try:
            result = genai.embed_content(
                model=self.model,
                content=text,
                task_type="RETRIEVAL_DOCUMENT"
            )
            embedding = np.array(result['embedding'])
            return normalize(embedding.reshape(1, -1))[0]
        except Exception as e:
            logger.error(f"Embedding error: {str(e)} | Text: {text[:100]}...")
            return np.zeros(self.embedding_dim)

    def batch_get_embeddings(self, texts: List[str]) -> np.ndarray:
       
        embeddings = []
        for i in range(0, len(texts), self.batch_size):
            batch = texts[i:i+self.batch_size]
            try:
                response = genai.batch_embed_texts(
                    model=self.model,
                    texts=batch
                )
                batch_embeddings = [np.array(e['embedding']) for e in response]
                embeddings.extend(batch_embeddings)
            except Exception as e:
                logger.error(f"Batch embedding failed: {str(e)}")
                embeddings.extend([np.zeros(self.embedding_dim)]*len(batch))
        return normalize(np.array(embeddings))

    def calculate_text_similarity(self, brand_text: str, influencer_text: str) -> float:
       
        brand_embedding = self.get_embedding(brand_text)
        influencer_embedding = self.get_embedding(influencer_text)
        
        similarity = cosine_similarity(
            brand_embedding.reshape(1, -1),
            influencer_embedding.reshape(1, -1)
        )[0][0]
        
        return float(np.clip(similarity, 0, 1)) 

    def get_similarity_matrix(self, brands_df: pd.DataFrame, influencers_df: pd.DataFrame) -> np.ndarray:
      
        brand_texts = [self.get_brand_text_features(row) for _, row in brands_df.iterrows()]
        influencer_texts = [self.get_influencer_text_features(row) for _, row in influencers_df.iterrows()]
        
       
        brand_embeddings = self.batch_get_embeddings(brand_texts)
        influencer_embeddings = self.batch_get_embeddings(influencer_texts)
        
      
        similarity_matrix = cosine_similarity(brand_embeddings, influencer_embeddings)
        return np.clip(similarity_matrix, 0, 1)

    def analyze_feature_alignment(self, brand_text: str, influencer_text: str) -> Dict:
       
        brand_features = set(brand_text.split(" | "))
        influencer_features = set(influencer_text.split(" | "))
        
        common_features = brand_features & influencer_features
        unique_brand = brand_features - influencer_features
        unique_influencer = influencer_features - brand_features
        
        return {
            'common_features': list(common_features),
            'unique_brand_features': list(unique_brand),
            'unique_influencer_features': list(unique_influencer),
            'feature_overlap_ratio': len(common_features) / max(len(brand_features), 1)
        }

    def print_detailed_match_analysis(self, brand: pd.Series, influencer: pd.Series, similarity_score: float):
       
        brand_text = self.get_brand_text_features(brand)
        influencer_text = self.get_influencer_text_features(influencer)
        alignment = self.analyze_feature_alignment(brand_text, influencer_text)
        
        print("\n" + "="*80)
        print(f"Match Analysis - Brand: {brand.get('name', 'Unknown')} vs Influencer: {influencer.get('name', 'Unknown')}")
        print("-"*80)
        
        print("\nFeature Alignment:")
        print(f"Common Features ({len(alignment['common_features'])}):")
        for feat in alignment['common_features'][:5]:
            print(f"  - {feat}")
        
        print(f"\nBrand Unique Features ({len(alignment['unique_brand_features'])}):")
        for feat in alignment['unique_brand_features'][:3]:
            print(f"  - {feat}")
        
        print(f"\nInfluencer Unique Features ({len(alignment['unique_influencer_features'])}):")
        for feat in alignment['unique_influencer_features'][:3]:
            print(f"  - {feat}")
        
        print("\n" + "-"*80)
        print(f"Text Similarity Score: {similarity_score:.4f}")
        print("Score Interpretation:")
        self._print_score_interpretation(similarity_score)
        print("="*80)

    def _print_score_interpretation(self, score: float):
     
        thresholds = [
            (0.9, "Exceptional Match", "Near-perfect alignment in brand/influencer characteristics"),
            (0.7, "Strong Match", "High potential for successful collaboration"),
            (0.5, "Moderate Match", "Potential with some adjustments needed"),
            (0.3, "Weak Match", "Limited alignment - consider carefully"),
            (0.0, "Poor Match", "Unlikely to be a good fit")
        ]
        
        for threshold, title, description in thresholds:
            if score >= threshold:
                print(f"{title} (≥{threshold:.1f}): {description}")
                return
                
    def save_embeddings(self, df: pd.DataFrame, output_path: str, entity_type: str = "brand"):
      
        texts = []
        for _, row in df.iterrows():
            if entity_type == "brand":
                texts.append(self.get_brand_text_features(row))
            else:
                texts.append(self.get_influencer_text_features(row))
        
        embeddings = self.batch_get_embeddings(texts)
        np.save(output_path, embeddings)
        logger.info(f"Saved {entity_type} embeddings to {output_path}")

    def load_embeddings(self, input_path: str) -> np.ndarray:
       
        return np.load(input_path)
<!DOCTYPE html>
<html lang="pt-br">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Para Você ❤️</title>
    <style>
        body {
            font-family: Arial, sans-serif;
            text-align: center;
            background-color: #ffe6e6;
            padding: 50px;
        }
        .card {
            background: white;
            padding: 20px;
            border-radius: 10px;
            box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.1);
            display: inline-block;
        }
        h1 {
            color: #ff4d4d;
        }
    </style>
</head>
<body>
    <div class="card">
        <h1>Para Minha Pessoa Especial ❤️</h1>
        <p>Oi, amor! Você é a melhor coisa que já aconteceu na minha vida.</p>
        <p>Cada momento ao seu lado é incrível, e eu sou muito grato por ter você.</p>
        <p>Te amo demais! 💕</p>
    </div>
</body>
</html>
<script>
// auto loading tabs
  var Webflow = Webflow || [];
  Webflow.push(function () {
    // Fix for Safari
    if (navigator.userAgent.includes("Safari")) {
      document.querySelectorAll(".acc_tab").forEach((t) => (t.focus = function () {
        const x = window.scrollX, y = window.scrollY;
        const f = () => {
          setTimeout(() => window.scrollTo(x, y), 1);
          t.removeEventListener("focus", f);
        };
        t.addEventListener("focus", f);
        HTMLElement.prototype.focus.apply(this, arguments);
      }));
    }
    // Start Tabs
    function startTabs() {
      var tabTimeout;
      clearTimeout(tabTimeout);
      tabLoop();
      // Connect your class names to elements.
      function tabLoop() {
        tabTimeout = setTimeout(function () {
          var $next = $('.acc_tabs-menu').children('.w--current:first').next();
          if ($next.length) {
            $next.click(); // user click resets timeout
          } else {
            $('.acc_tab:first').click();
          }
        }, 5000); // 5 Second Rotation
      }
      // Reset Loops
      $('.acc_tab').click(function () {
        clearTimeout(tabTimeout);
        tabLoop();
      });
    }
    // Run tabs function
    startTabs();
  });
</script>
INTEGRITY :

องค์กรที่ตั้งอยู่บนความชอบธรรม
POST /logs HTTP/1.1
Host: example.com
Content-Type: application/logplex-1
Logplex-Msg-Count: 10
User-Agent: log-shuttle/x (y; z; w; v)
Content-Length: 2660

266 <190>1 2017-03-15T16:10:27.008803+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#load_avg_1m=0.00 sample#load_avg_5m=0.00 sample#load_avg_15m=0.00
391 <190>1 2017-03-15T16:10:27.009270+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#memory_total=8.15MB sample#memory_rss=3.95MB sample#memory_cache=2.66MB sample#memory_swap=0.00MB sample#memory_pgpgin=2603pages sample#memory_pgpgout=1423pages sample#memory_quota=2560.00MB
266 <190>1 2017-03-15T16:10:57.008773+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#load_avg_1m=0.00 sample#load_avg_5m=0.00 sample#load_avg_15m=0.00
391 <190>1 2017-03-15T16:10:57.009215+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#memory_total=8.15MB sample#memory_rss=3.95MB sample#memory_cache=2.66MB sample#memory_swap=0.00MB sample#memory_pgpgin=2603pages sample#memory_pgpgout=1423pages sample#memory_quota=2560.00MB
266 <190>1 2017-03-15T16:11:27.008840+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#load_avg_1m=0.00 sample#load_avg_5m=0.00 sample#load_avg_15m=0.00
391 <190>1 2017-03-15T16:11:27.009436+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#memory_total=8.15MB sample#memory_rss=3.95MB sample#memory_cache=2.66MB sample#memory_swap=0.00MB sample#memory_pgpgin=2603pages sample#memory_pgpgout=1423pages sample#memory_quota=2560.00MB
266 <190>1 2017-03-15T16:11:57.008775+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#load_avg_1m=0.00 sample#load_avg_5m=0.00 sample#load_avg_15m=0.00
391 <190>1 2017-03-15T16:11:57.009221+00:00 11844141-29d5-4425-9fd8-894759ae0dc7 heroku web.1 - - source=web.1 dyno=heroku.11844141-29d5-4425-9fd8-894759ae0dc7.91bd5d7c-b12c-47a0-9d67-daef0e4aca96 sample#memory_total=8.15MB sample#memory_rss=3.95MB sample#memory_cache=2.66MB sample#memory_swap=0.00MB sample#memory_pgpgin=2603pages sample#memory_pgpgout=1423pages sample#memory_quota=2560.00MB
<div align="center">
  <img src="https://s2.loli.net/2025/02/21/KFh9pSTxQWcDJwO.png" height="100">
  <div>&nbsp;</div>
  <img src="https://readme-typing-svg.herokuapp.com?font=Fira+Code&pause=1000&color=2EA2F7&center=true&vCenter=true&width=435&lines=YOU+FOCUS+YOUR+WALK" alt="Typing SVG" />
</div>

<!-- # Introduction
YOU FOCUS YOUR WALK is a pedestrian cell phone usage detection system. Once deployed on the streets, it detects the postures and the hand images of pedestrians to determine whether they are using a cell phone. The face of those who are using the cell phone will be announced. -->
$ heroku logs --dyno router
2012-02-07T09:43:06.123456+00:00 heroku[router]: at=info method=GET path="/stylesheets/dev-center/library.css" host=devcenter.heroku.com fwd="204.204.204.204" dyno=web.5 connect=1ms service=18ms status=200 bytes=13
2012-02-07T09:43:06.123456+00:00 heroku[router]: at=info method=GET path="/articles/bundler" host=devcenter.heroku.com fwd="204.204.204.204" dyno=web.6 connect=1ms service=18ms status=200 bytes=20375

$ heroku logs --source app
2012-02-07T09:45:47.123456+00:00 app[web.1]: Rendered shared/_search.html.erb (1.0ms)
2012-02-07T09:45:47.123456+00:00 app[web.1]: Completed 200 OK in 83ms (Views: 48.7ms | ActiveRecord: 32.2ms)
2012-02-07T09:45:47.123456+00:00 app[worker.1]: [Worker(host:465cf64e-61c8-46d3-b480-362bfd4ecff9 pid:1)] 1 jobs processed at 23.0330 j/s, 0 failed ...
2012-02-07T09:46:01.123456+00:00 app[web.6]: Started GET "/articles/buildpacks" for 4.1.81.209 at 2012-02-07 09:46:01 +0000

$ heroku logs --source app --dyno worker
2012-02-07T09:47:59.123456+00:00 app[worker.1]: [Worker(host:260cf64e-61c8-46d3-b480-362bfd4ecff9 pid:1)] Article#record_view_without_delay completed after 0.0221
2012-02-07T09:47:59.123456+00:00 app[worker.1]: [Worker(host:260cf64e-61c8-46d3-b480-362bfd4ecff9 pid:1)] 5 jobs processed at 31.6842 j/s, 0 failed ...
​

      <div class="testimonial-card">

        <div class="testimonial-author">Jeanette</div>

        <div class="star-rating">★★★★★</div>

        <div class="testimonial-text">I'm thrilled to have a yoga studio in the Excelsior neighborhood! The studio is very welcoming with a variety of classes.</div>

      </div>

​

      <div class="testimonial-card">

        <div class="testimonial-author">Thomas</div>

        <div class="star-rating">★★★★★</div>

        <div class="testimonial-text">A beautiful yoga studio with knowledgeable teachers. It's great to have a space like this in the community.</div>

      </div>

​

      <div class="testimonial-card">

        <div class="testimonial-author">Michelle</div>

        <div class="star-rating">★★★★★</div>

        <div class="testimonial-text">So glad this great yoga studio has recently opened up in the Excelsior. I have been taking a week of classes so far and I love it.</div>

      </div>

    </div>

  </div>

</div>

​
curl http://localhost:11434/api/embeddings -d '{
  "model": "nomic-embed-text",
  "prompt": "The sky is blue because of Rayleigh scattering"
}'
$a-tags: 'a, a:active, a:hover, a:visited';
$a-tags-hover: 'a:active, a:hover';
 
#{$a-tags} {
  color: red;
  text-decoration: none;
}
#{$a-tags-hover} {
  color: blue;
}
star

Wed Feb 26 2025 06:20:27 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/white-label-tokenization-platform/

@LilianAnderson #whitelabeltokenization #tokenizationplatform #blockchainforstartups #digitalassetsolutions #tokenizeyourbusiness

star

Wed Feb 26 2025 06:16:46 GMT+0000 (Coordinated Universal Time)

@piyushkumar121 #python

star

Wed Feb 26 2025 03:03:11 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Wed Feb 26 2025 01:38:24 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Tue Feb 25 2025 18:55:24 GMT+0000 (Coordinated Universal Time)

@jesus

star

Tue Feb 25 2025 16:04:49 GMT+0000 (Coordinated Universal Time)

@baamn #powershell #filename

star

Tue Feb 25 2025 14:18:27 GMT+0000 (Coordinated Universal Time)

@MinaTimo

star

Tue Feb 25 2025 12:32:46 GMT+0000 (Coordinated Universal Time)

@Shira

star

Tue Feb 25 2025 10:34:02 GMT+0000 (Coordinated Universal Time)

@erika

star

Tue Feb 25 2025 10:07:11 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/cryptocurrency-exchange-business-plan/

@CharleenStewar ##cryptocurrencyexchange business plan

star

Tue Feb 25 2025 08:54:31 GMT+0000 (Coordinated Universal Time)

@MinaTimo

star

Tue Feb 25 2025 08:19:25 GMT+0000 (Coordinated Universal Time) https://www.opris.exchange/white-label-cryptocurrency-exchange-development/

@oprisexchange #cryptoexchange #cryptocurrency #bitcoin #binanceclone #opris #white_label_software

star

Tue Feb 25 2025 08:08:54 GMT+0000 (Coordinated Universal Time)

@baamn #trid #stdin

star

Tue Feb 25 2025 07:53:08 GMT+0000 (Coordinated Universal Time) https://prismjs.com/

@baamn #markdown #codeblock

star

Tue Feb 25 2025 06:34:26 GMT+0000 (Coordinated Universal Time)

@reiddd #javascript

star

Tue Feb 25 2025 06:33:26 GMT+0000 (Coordinated Universal Time) https://pkgsrc.org/

@sercantas

star

Tue Feb 25 2025 05:50:31 GMT+0000 (Coordinated Universal Time) https://www.addustechnologies.com/crypto-forex-trading-with-mt4-mt5-development

@Seraphina

star

Tue Feb 25 2025 05:43:41 GMT+0000 (Coordinated Universal Time)

@piyushkumar121 #pytho

star

Tue Feb 25 2025 03:02:41 GMT+0000 (Coordinated Universal Time)

@Rohan@99

star

Tue Feb 25 2025 02:03:09 GMT+0000 (Coordinated Universal Time)

@procodefinder

star

Mon Feb 24 2025 15:54:33 GMT+0000 (Coordinated Universal Time)

@davidb2107 #power_query

star

Mon Feb 24 2025 15:15:59 GMT+0000 (Coordinated Universal Time) https://www.prestashop.com/forums/topic/633322-solvedhow-to-set-a-taxrule-for-all-products/

@caovillanueva ##mysql

star

Mon Feb 24 2025 14:27:47 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 14:17:12 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:50:58 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:39:17 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 13:35:21 GMT+0000 (Coordinated Universal Time) undefined

@пп

star

Mon Feb 24 2025 13:34:13 GMT+0000 (Coordinated Universal Time) https://yandex.ru/search/?text

@пп

star

Mon Feb 24 2025 13:26:10 GMT+0000 (Coordinated Universal Time)

@erika

star

Mon Feb 24 2025 12:55:22 GMT+0000 (Coordinated Universal Time) https://www.trioangle.com/bybit-clone-script/

@Johnhendrick #java #javascript #django #react.js #angular #android #asp.net

star

Mon Feb 24 2025 10:15:56 GMT+0000 (Coordinated Universal Time) https://appticz.com/cryptocurrency-exchange-script

@nithivandhana #crypto #cryptocurrency #cryptoexchangescript #bitcoinexchangescript #cryptocurrencyexchangescript

star

Mon Feb 24 2025 10:05:36 GMT+0000 (Coordinated Universal Time) https://appticz.com/rental-script

@nithivandhana #ondemand #rental #script #rentalscript

star

Mon Feb 24 2025 09:57:31 GMT+0000 (Coordinated Universal Time) https://appticz.com/on-demand-app-development

@nithivandhana #ondemand #ondemandappdevelopmentcompany #ondemandapps ##ondemandappdevelopment #appticz

star

Mon Feb 24 2025 09:39:36 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/how-to-start-a-cryptocurrency-exchange/

@CharleenStewar #howto start a cryptocurrency exchange business #howto start a cryptocurrency exchange #starta cryptocurrency exchange

star

Mon Feb 24 2025 08:55:17 GMT+0000 (Coordinated Universal Time)

@piyushkumar121

star

Sun Feb 23 2025 17:23:53 GMT+0000 (Coordinated Universal Time)

@piyushkumar121

star

Sun Feb 23 2025 15:52:43 GMT+0000 (Coordinated Universal Time)

@Lpss

star

Sun Feb 23 2025 11:52:33 GMT+0000 (Coordinated Universal Time) https://www.promise.co.th/blog/how-to-check-scam.html

@kiritokato

star

Sun Feb 23 2025 09:31:14 GMT+0000 (Coordinated Universal Time)

@erika

star

Sun Feb 23 2025 07:45:03 GMT+0000 (Coordinated Universal Time) https://hyperbound.design.webflow.com/

@pradeep188

star

Sun Feb 23 2025 07:08:29 GMT+0000 (Coordinated Universal Time) https://www.cib.go.th/

@kiritokato

star

Sun Feb 23 2025 03:50:11 GMT+0000 (Coordinated Universal Time) https://devcenter.heroku.com/articles/private-space-logging#logging-format

@cholillo18

star

Sun Feb 23 2025 02:59:20 GMT+0000 (Coordinated Universal Time)

@hyzzzz #python

star

Sun Feb 23 2025 01:34:31 GMT+0000 (Coordinated Universal Time) https://devcenter.heroku.com/articles/logging#view-logs

@cholillo18 #term

star

Sun Feb 23 2025 01:34:17 GMT+0000 (Coordinated Universal Time) https://devcenter.heroku.com/articles/logging#view-logs

@cholillo18 #term

star

Sat Feb 22 2025 22:15:21 GMT+0000 (Coordinated Universal Time) https://codepen.io/pen/

@sanchezarmando4 #undefined

star

Sat Feb 22 2025 14:01:19 GMT+0000 (Coordinated Universal Time) https://ollama.com/library/nomic-embed-text

@sercantas

star

Sat Feb 22 2025 09:18:54 GMT+0000 (Coordinated Universal Time) https://css-tricks.com/snippets/sass/use-sass-variable-selector/

@Sebhart #sass

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension