Snippets Collections
#include <stdio.h>

int main() {
    
    float cp,sp;
    
    printf("Enter The Cost Price : ");
    scanf("%f",&cp);
    
    printf("Enter The Selling Price : ");
    scanf("%f",&sp);
    
    float p,l;
    p=(sp-cp);
    l=(cp-sp);
    
    if(cp>sp){
        printf("\nLoss = %f",l);
    }
    if(sp>cp){
        printf("\nProfit = %f ",p);
    }
   if(sp==cp){
        printf("\nNo Loss , No Profit");
    }
    return 0;
}
#include <stdio.h>

int main() {
    
    int a;
    
    printf("Enter The Value Of Integer : ");
    scanf("%d",&a);
    
    int b; 
    b = a*(-1);
    
    if(a>0){
        printf("\n %d Is The Absolute Value Of That Integer.",a);
    }
    if(a<0){
        printf("\n %d Is The Absolute Value Of That Integer.",b);
    }
    return 0;
}
/**
 * C program to find all roots of a quadratic equation
 */

#include <stdio.h>
#include <math.h> /* Used for sqrt() */

int main()
{
    float a, b, c;
    float root1, root2, imaginary;
    float discriminant;
    
    printf("Enter values of a, b, c of quadratic equation (aX^2 + bX + c): ");
    scanf("%f%f%f", &a, &b, &c);
    
    /* Find discriminant of the equation */
    discriminant = (b * b) - (4 * a * c);
    
   
    /* Find the nature of discriminant */
    if(discriminant > 0)
    {
        root1 = (-b + sqrt(discriminant)) / (2*a);
        root2 = (-b - sqrt(discriminant)) / (2*a);

        printf("Two distinct and real roots exists: %.2f and %.2f", root1, root2);
    }
    else if(discriminant == 0)
    {
        root1 = root2 = -b / (2 * a);

        printf("Two equal and real roots exists: %.2f and %.2f", root1, root2);
    }
    else if(discriminant < 0)
    {
        root1 = root2 = -b / (2 * a);
        imaginary = sqrt(-discriminant) / (2 * a);

        printf("Two distinct complex roots exists: %.2f + i%.2f and %.2f - i%.2f", 
                root1, imaginary, root2, imaginary);
    }

    return 0;
}
/**
 * C program to count minimum number of notes in an amount
 */
 
#include <stdio.h>

int main()
{
    int amount;
    int note500, note100, note50, note20, note10, note5, note2, note1;
    
    /* Initialize all notes to 0 */
    note500 = note100 = note50 = note20 = note10 = note5 = note2 = note1 = 0;


    /* Input amount from user */
    printf("Enter amount: ");
    scanf("%d", &amount);


    if(amount >= 500)
    {
        note500 = amount/500;
        amount -= note500 * 500;
    }
    if(amount >= 100)
    {
        note100 = amount/100;
        amount -= note100 * 100;
    }
    if(amount >= 50)
    {
        note50 = amount/50;
        amount -= note50 * 50;
    }
    if(amount >= 20)
    {
        note20 = amount/20;
        amount -= note20 * 20;
    }
    if(amount >= 10)
    {
        note10 = amount/10;
        amount -= note10 * 10;
    }
    if(amount >= 5)
    {
        note5 = amount/5;
        amount -= note5 * 5;
    }
    if(amount >= 2)
    {
        note2 = amount /2;
        amount -= note2 * 2;
    }
    if(amount >= 1)
    {
        note1 = amount;
    }

    /* Print required notes */
    printf("Total number of notes = \n");
    printf("500 = %d\n", note500);
    printf("100 = %d\n", note100);
    printf("50 = %d\n", note50);
    printf("20 = %d\n", note20);
    printf("10 = %d\n", note10);
    printf("5 = %d\n", note5);
    printf("2 = %d\n", note2);
    printf("1 = %d\n", note1);

    return 0;
}
#include <stdio.h>

int main()
{
    int month;

    /* Input month number from user */
    printf("Enter month number (1-12): ");
    scanf("%d", &month);


    if(month == 1)
    {
        printf("31 days");
    }
    else if(month == 2)
    {
        printf("28 or 29 days");
    }
    else if(month == 3)
    {
        printf("31 days");
    }
    else if(month == 4)
    {
        printf("30 days");
    }
    else if(month == 5)
    {
        printf("31 days");
    }
    else if(month == 6)
    {
        printf("30 days");
    }
    else if(month == 7)
    {
        printf("31 days");
    }
    else if(month == 8)
    {
        printf("31 days");
    }
    else if(month == 9)
    {
        printf("30 days");
    }
    else if(month == 10)
    {
        printf("31 days");
    }
    else if(month == 11)
    {
        printf("30 days");
    }
    else if(month == 12)
    {
        printf("31 days");
    }
    else
    {
        printf("Invalid input! Please enter month number between (1-12).");
    }

    return 0;
#include <stdio.h>

int main()
{
    char ch;

  
    printf("Enter any character: ");
    scanf("%c", &ch);


    
    if(ch=='a' || ch=='e' || ch=='i' || ch=='o' || ch=='u' || 
       ch=='A' || ch=='E' || ch=='I' || ch=='O' || ch=='U')
    {
        printf("\n'%c' is Vowel.", ch);
    }
    else if((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z'))
    {
        
        printf("\n'%c' is Consonant.", ch);
    }
    else 
    {
       
        printf("\n'%c' is not an alphabet.", ch);
    }

    return 0;
}
#include <stdio.h>

int main() {
    
    int a;
    
    printf("Enter A Year : ");
    scanf("%d",&a);
    
    if(a%4==0){
        printf("\n %d Is A Leap Year",a);
    }
    else{
        printf("\n %d Is Not A Leap Year",a);
    }
    
    return 0;
}
<!-- HEADER TOP AREA -->
        <div class="rts-ht rts-ht__bg">
            <div class="container">
                <div class="row">
                    <div class="rts-ht__wrapper">
                        <div class="rts-ht__email">
                            <div class="rts-ht__email">
                                <a href="mailto:hallo@virtupar.com"><img src="assets/images/icon/email.svg" alt="Kontakt per Mail" class="icon"> E-Mail</a>
                            </div>
                        </div> 
                        <div class="login-btn-has-dropdown"> 
                            <div class="live__chat">
                                <a href="https://wa.link/b8m69r" class="live__chat"><img src="assets/images/icon/WhatsApp-Digital-Glyph-Green-25x25.png" alt="WhatsApp-Chat" height="20" width="20" class="icon"> WhatsApp</a>
                            </div>
                        </div>
                        <div class="rts-ht__promo">
                            <p><img class="icon" src="assets/images/icon/tag--group.svg" alt="Virtupar Hosting Sonderangebot"> Hosting Angebote bereits ab <strong>3,99€ pro Monat</strong>. Wechseln Sie jetzt!</p>
                        </div>
                    </div>
                </div>
            </div>
        </div>
        <!-- HEADER TOP AREA END -->
          
          
          
          
          
 <!-- ERINNERUNG GRÖSSE FÜR IMAGES -->   style="width:128px;height:128px"
newDm = Add_POD_Person[ID == input.New_CRM1];
	devlist = List();
	for each  rec in input.Developments1
	{
		devlist.add(rec);
	}
	for each rec2 in newDm.Dev_vis
    {
   devlist.add(rec2);
    }
    newDm.Dev_vis= devlist;
$A.get("e.force:navigateToURL").setParams(
    {"url": "/apex/pageName?id=00141000004jkU0AAI"}).fire();
$A.get("e.force:navigateToURL").setParams({"url": "/apex/pageName"}).fire();
create or replace procedure rev(n in out integer,rn in out integer,b in out boolean )
is
temp integer:=0;
di integer:=0;
 
begin
temp:=n; 
while n !=0 loop
di:=n mod 10;
rn:=rn*10+di;
n:=trunc(n/10);
end loop;
if temp=rn then
b:=true;
else
b:=false;
end if;



end;
/







declare
 a integer :=&a;
b integer:=0 ;
c integer:=0;
d integer:=0;
f boolean :=false;
begin
rev(a,d,f);
if f=true then
dbms_output.put_line('it is a palindrome');
else
dbms_output.put_line('it  is not a palindrome');
end if;

end;
/
create or replace procedure rev(n in out integer,di in out integer,rn in out integer)
is
 
begin
 
while n !=0 loop
di:=n mod 10;
rn:=rn*10+di;
n:=trunc(n/10);
end loop;
end;
/



declare
 a integer :=&a;
b integer:=0 ;
c integer:=0;
begin
 
rev(a,c,b);
dbms_output.put_line(b);
end;
/
<apex:page>
    <h1>Hello World</h1>
</apex:page>
import React, { useState, useEffect } from 'react';

const App = () => {
  const [wave, setWave] = useState(0);

  // Список данных о сотрудниках
  const players = [
    {
      id: 1,
      name: 'Азамат',
      nickname: 'Player1',
      hoursPlayed: 500,
      role: 'Sniper',
      elo: 1500,
    },
    {
      id: 2,
      name: 'Олжас',
      nickname: 'Player2',
      hoursPlayed: 450,
      role: 'Riffle',
      elo: 1400,
    },
    {
      id: 3,
      name: 'Темер',
      nickname: 'Player3',
      hoursPlayed: 600,
      role: 'Riffle',
      elo: 1600,
    },
    {
      id: 4,
      name: 'Нурбек',
      nickname: 'Player4',
      hoursPlayed: 550,
      role: 'Riffle',
      elo: 1550,
    },
    // Добавьте больше игроков по желанию
  ];

  // Эффект для изменения волнообразной анимации
  useEffect(() => {
    const interval = setInterval(() => {
      setWave((prevWave) => prevWave + 1);
    }, 200); // Интервал в 200 мс

    return () => clearInterval(interval);
  }, []);

  // Стили для таблицы
  const styles = {
    table: {
      borderCollapse: 'collapse',
      width: '80%',
      margin: '20px auto',
      border: '1px solid #ddd',
      fontFamily: 'Arial, sans-serif',
    },
    th: {
      padding: '12px',
      textAlign: 'left',
      backgroundColor: '#f2f2f2',
      border: '1px solid #ddd',
    },
    td: {
      padding: '12px',
      border: '1px solid #ddd',
    },
    wavyTitle: {
      textAlign: 'center', // Центрируем текст заголовка
    },
    waveEffect: {
      display: 'inline-block',
      transition: 'transform 0.2s ease-in-out',
    },
  };

  // Функция для рендеринга волнообразного заголовка
  const renderWavyTitle = () => {
    const title = 'Таблица офисных игроков CS 1.6';
    return (
      <h1 style={styles.wavyTitle}>
        {title.split('').map((char, index) => (
          <span
            key={index}
            style={{
              ...styles.waveEffect,
              transform: `translateY(${Math.sin((index + wave) * 0.3) * 10}px)`,
            }}
          >
            {char}
          </span>
        ))}
      </h1>
    );
  };

  return (
    <div>
      {renderWavyTitle()}
      <table style={styles.table}>
        <thead>
          <tr>
            <th style={styles.th}>ID</th>
            <th style={styles.th}>Имя</th>
            <th style={styles.th}>Никнейм</th>
            <th style={styles.th}>Часов сыграно</th>
            <th style={styles.th}>Роль в игре</th>
            <th style={styles.th}>ELO</th>
          </tr>
        </thead>
        <tbody>
          {players.map((player) => (
            <tr key={player.id}>
              <td style={styles.td}>{player.id}</td>
              <td style={styles.td}>{player.name}</td>
              <td style={styles.td}>{player.nickname}</td>
              <td style={styles.td}>{player.hoursPlayed}</td>
              <td style={styles.td}>{player.role}</td>
              <td style={styles.td}>{player.elo}</td>
            </tr>
          ))}
        </tbody>
      </table>
    </div>
  );
};

export default App;
import React, { useState, useEffect } from 'react';

const App = () => {
  const [wave, setWave] = useState(0);

  // Эффект для изменения волнообразной анимации
  useEffect(() => {
    const interval = setInterval(() => {
      setWave((prevWave) => prevWave + 1);
    }, 100); // Интервал в 100 мс для более плавной анимации

    return () => clearInterval(interval);
  }, []);

  return (
    <div style={styles.container}>
      <h1 style={styles.wavyText}>
        {['H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd'].map((char, index) => (
          <span
            key={index}
            style={{
              display: 'inline-block',
              transform: `translateY(${Math.sin((index + wave) * 0.3) * 15}px)`,
              transition: 'transform 0.2s ease-in-out'
            }}
          >
            {char}
          </span>
        ))}
      </h1>
    </div>
  );
};

// Стили в виде JavaScript объектов
const styles = {
  container: {
    display: 'flex',
    justifyContent: 'center',
    alignItems: 'center',
    height: '100vh',
    backgroundColor: '#f0f0f0'
  },
  wavyText: {
    fontSize: '3rem',
    color: '#333',
    fontFamily: 'Arial, sans-serif'
  }
};

export default App;
declare
 a integer :=&a;
b integer:=0 ;
begin
fact(a,b);
dbms_output.put_line(b);
end;
/

create or replace procedure fact(n in integer,b  in out integer)
is 
 
begin
b:=1;
for i in 1..n loop
b:=b*i;

end loop;

end;
/

= Sql.Database("USORASPSQLINT", "CLARINS_AUDIT",
               [Query="SELECT #(lf)    TASKDETAILKEY,#(lf)    TASKTYPE,#(lf)    DESCRIPTION,#(lf)    STORERKEY,#(lf)    SKU,#(lf)    UOM,#(lf)    UOMQTY,#(lf)    FROMLOC,#(lf)    FROMID,#(lf)    TOLOC,#(lf)    TOID,#(lf)    ORDERKEY,#(lf)    ORDERLINENUMBER,#(lf)    WAVEKEY,#(lf)    PRIORITY,#(lf)    STATUS,#(lf)    ROUTE,#(lf)    CONVERT(DATE, ADDDATE) AS ADDDATE,#(lf)    ADDWHO,#(lf)    CONVERT(DATE, EDITDATE) AS EDITDATE,#(lf)    EDITWHO,#(lf)    STARTTIME,#(lf)    CONVERT(DATE, ENDTIME) AS ENDTIME#(lf)
               FROM CLARINS_AUDIT.SCE.vw_TASKDETAIL_Pending;#(lf)"])
                
import java.util.*;
import java.lang.*;
import java.io.*;

class Codechef
{
    public static void mergeSort(int[] data, int start, int end) {
        if(start < end) {
            int mid = (start + end) / 2;
            mergeSort(data, start, mid);
            mergeSort(data, mid + 1, end);
            merge(data, start, mid, end);
        }
    }
    public static void merge(int[] data, int start, int mid, int end) {
        int[] temp = new int[end - start + 1];
        System.out.println(start +" "+ mid +" "+end);
        
        // i --> starting of left subarray, j--> starting of right subarray
        // mid --> Ending of left subarray, end--> Ending of right subarray
        // k--> pointer for temp array
        int i = start, j = mid + 1, k = 0;
        
        // Ist merge i.e both left and right subarrays have values 
        while(i <= mid && j <= end) {
            if(data[i] <= data[j]) 
                temp[k++] = data[i++];
            else    
                temp[k++] = data[j++];
        }
        
        // 2nd merge i.e run only when left subrray is remaining to be merged
        // and right subrray is done with merging
        while(i <= mid) {
            temp[k++] = data[i++];
        }
        
        // 2nd merge i.e run only when right subrray is remaining to be merged
        // and left subrray is done with merging
        while(j <= end) {
            temp[k++] = data[j++];
        }
        
        // putting back sorted values from temp into the original array
        for(i = start; i <= end; i++) {
            data[i] = temp[i - start];
        }
        
    }
	public static void main (String[] args) throws java.lang.Exception
	{
	    int data[] = {38, 27, 43, 3, 9, 82, 10};
		mergeSort(data, 0 , data.length - 1);
		for(int num : data) {
		    System.out.print(num +" ");
		}

	}
}
import React, { useState, useEffect } from 'react';
import axios from 'axios';

const Main = () => {
    const [contentType, setContentType] = useState('movies'); // Default to movies
    const [data, setData] = useState([]);

    useEffect(() => {
        const fetchData = async () => {
            const API_KEY = process.env.REACT_APP_OMDB_API_KEY;
            const url = `https://www.omdbapi.com/?apikey=${API_KEY}&s=batman&type=${contentType}`;
            try {
                const response = await axios.get(url);
                setData(response.data.Search);
            } catch (error) {
                console.error('Error fetching data:', error);
            }
        };

        fetchData();
    }, [contentType]);

    return (
        <div>
            <button onClick={() => setContentType('movies')}>Movies</button>
            <button onClick={() => setContentType('series')}>TV Shows</button>
            {data.map(item => (
                <div key={item.imdbID}>
                    <h3>{item.Title}</h3>
                    <img src={item.Poster} alt={item.Title} />
                </div>
            ))}
        </div>
    );
};

export default Main;



import React from 'react';
import Navbar from './components/Navbar';
import Main from './components/Main';

const App = () => {
    return (
        <>
            <Navbar />
            <Main />
        </>
    );
};

export default App;

 <div class="container">
      <div class="item1">
        <div class="txt"></div>
        <div class="img">
        </div>
      </div>
      <div class="item2"></div>
      <div class="item3"></div>
      <div class="item4"></div>
      <div class="item5"></div>
      <div class="item6"></div>
      <div class="item7"></div>
      <div class="item8"></div>
      <div class="item9"></div>
      <div class="item10"></div>
    </div>
 int[] numbers = {10,20,30,40,50};
        
        
        String[] indexElements = IntStream.range(0, numbers.length).mapToObj(i -> i + ":" + numbers[i]).toArray(String[]::new);

        System.out.println(" "+ Arrays.toString(indexElements));


------------------------------------------------------------------------------

 
        int[] numbers = {10,20,30,40,50};
    
      int sum = IntStream.of(numbers).sum();

        System.out.println(" "+ sum);
# Initialize session state for earnings_df
if 'earnings_df' not in st.session_state:
    st.session_state['earnings_df'] = pd.DataFrame()

# Get the filename from user input
filename = st.text_input("Enter the CSV filename:", 'msftq2.csv')

# Load the data only after pressing the 'Load File' button
if st.button('Load File', key='load_file'):
    st.session_state['earnings_df'] = load_data(filename)
    st.success("File loaded successfully!")
#include <stdio.h>

int main()
{
    int number[5];
    char choice;
    int sum;
    int product;
    
    printf("Input five whole numbers:\n");
    
    printf("--------------------------\n\n");
    
    for(int i =1;i<=5;i++)
    {
    printf("Enter number %d:\n",i);
    scanf("%d",&number[i]);
    
    }

    printf("\nThe five numbers are: {");
    
    for(int i =1;i<=5;i++)
    {
      printf(" %d",number[i]); 
      
      if(i!=5)
      {
          printf(", ");
      }
    }
    
    printf(" }");
    
    printf("\n\nWould you like to sum or multiply the elements (s/m)?\n");
    scanf(" %c",&choice);
    
    switch(choice)
    {

        
        case 's':
        printf("The sum of ");
        for(int i =1;i<=5;i++)
        {
            printf("%d ",number[i]);
            if(i!=5)
            {
                printf("+ ");
            }
            
            sum += number[i];
      
        }
            printf("is %d",sum);
            
            break;
            

        
        case 'm':
        printf("The multiplication of ");
        for(int i =1;i<=5;i++)
        {
            printf("%d ",number[i]);
            if(i!=5)
            {
                printf("* ");
            }
            
            product *= number[i];
      
        }
            printf("is %d",product);
            
            break;
            
    }
    
        


    return 0;
}
#include <stdio.h>

int main()
{
    int start;
    int end;
    int step_size;
    
    printf("Count down start?\n");
    scanf("%d",&start);
    printf("Count down end?\n");
    scanf("%d",&end);
    printf("Step size?\n");
    scanf("%d",&step_size);

// Perform count down
    printf("%d", start);
    for (int i = start - step_size; i >= end; i -= step_size)
    {
        printf(", %d", i);
    }
    
    
    
    
    return 0;
}
#include <stdio.h>

int main(void)
{
    int number;
    int result = 1;

    // Prompt the user to input a non-negative whole number
    printf("Enter a non-negative whole number: ");
    scanf("%d", &number);

    // Check if the input is negative
    if (number < 0)
    {
        printf("\nBad Input! %d is negative...\n", number);
        return 1; // Terminate the program with an error code
    }

    // Compute the factorial
    for (int i = number; i >= 1; i--)
    {
        result *= i;
    }

    // Display the factorial result
    printf("\n%d! is %d\n", number, result);

    return 0;
}
#include <stdio.h>

int main()
{
    int start;
    int stop;
    int step;
    int iteration;
    
    printf("Starting number:\n");
    scanf("%d",&start);
    printf("Stopping number:\n");
    scanf("%d",&stop);
    printf("Step size:\n");
    scanf("%d",&step);
    
    printf("Using a for loop:\n");
    printf("\nStarting at %d...\n\n",start);
    
    for(int i = start; i<=stop;i+=step)
    {
        printf("In loop: %d...\n",i);
        iteration++;
    }
    
    
    printf("\nStopping at %d...\n",stop);
    printf("\nThis loop did %d iterations.\n",iteration);
    
    
    printf("\n");
    printf("\n");


    return 0;
}
input::file-selector-button{
  border-radius: 100px;
  padding: 8px 30px;
  margin-right: 12px;
  background: #F4F5F0;
  border: none;
}
@media (max-width: 640px) {
    .flex-col>* {
        width: 100%;
    }
}
.scrollable-description {
  max-height: 500px; /* Adjust this value as needed */
  overflow-y: auto;
}
.grad{
  background: linear-gradient(176deg, rgba(244, 245, 240, 0.00) -5.82%, #F4F5F0 97.33%);
}
#include <stdio.h>

int main()
{
    char rank;
    char suit;
    char valid_rank[100];
    char valid_suit[100];

    printf("Suit (d/h/s/c):\n");
    scanf(" %c", &suit);

    printf("Rank (A/2/3/4/5/6/7/8/9/T/J/Q/K):\n");
    scanf(" %c", &rank);

    switch(rank)
    {
        case 'A':
            sprintf(valid_rank, "A of");
            break;

        case '2':
            sprintf(valid_rank, "Two of");
            break;

        case '3':
            sprintf(valid_rank, "Three of");
            break;

        case '4':
            sprintf(valid_rank, "Four of");
            break;

        case '5':
            sprintf(valid_rank, "Five of");
            break;

        case '6':
            sprintf(valid_rank, "Six of");
            break;

        case '7':
            sprintf(valid_rank, "Seven of");
            break;

        case '8':
            sprintf(valid_rank, "Eight of");
            break;

        case '9':
            sprintf(valid_rank, "Nine of");
            break;

        case 'T':
            sprintf(valid_rank, "Ten of");
            break;

        case 'J':
            sprintf(valid_rank, "Jack of");
            break;

        case 'Q':
            sprintf(valid_rank, "Queen of");
            break;

        case 'K':
            sprintf(valid_rank, "King of");
            break;

        default:
            printf("Invalid Rank\n");
            return 1;
    }

    switch(suit)
    {
        case 'd':
            sprintf(valid_suit, " Diamonds");
            break;

        case 'h':
            sprintf(valid_suit, " Heart");
            break;

        case 's':
            sprintf(valid_suit, " Spades");
            break;

        case 'c':
            sprintf(valid_suit, " Clubs");
            break;

        default:
            printf("Invalid Suit\n");
            return 1;
    }

    printf("%s%s\n", valid_rank, valid_suit);

    return 0;
}
const s = 'Hello World';

console.log(s);
console.log(s.__proto__); // view in console to get all properties to use on a string

// or use
console.dir(s)
.element {
  width: min(90%, 1200px); /* Set a minimum width of 90% or 1200px, whichever is larger */
}
//decalring multiple variables at once
let a, b, c;
a = "days of summer";

const d = 10,
  e = 20,
  f = 30; // cannot be reassigned but used as an initial value for something

const baseNumber = d;

function getNumber(baseNumber, str) {
  return `${baseNumber} ${str}`;
}

console.log("get Number => ", getNumber(d, a));
git pull origin [branch name]
git add .
git commit -m ""
git push origin [branch name]
git remote -v
git remote add origin 
git remote remove origin
git checkout -b (create a branch)
git checkout (switch branch)
git rebase --continue 
git merge --continue
git push origin [branch name] --force-with-lease
<script setup lang="ts">
import { ref } from "vue";

type CarouselElement = {
  link: string;
  image: string;
  label: string;
  countryCode?: string;
};

interface Props {
  carouselElements: CarouselElement[];
}

const props = withDefaults(defineProps<Props>(), {
  carouselElements: () => [
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
  ],
});
const leftArrowRef = ref<HTMLButtonElement | null>(null);
const rightArrowRef = ref<HTMLButtonElement | null>(null);
const showArrows = ref(false);
const scrollableContainerRef = ref<HTMLDivElement | null>(null);

const scroll = (direction: "left" | "right") => {
  if (scrollableContainerRef.value) {
    const scrollAmount = 300;

    if (direction === "left") {
      scrollableContainerRef.value.scrollTo({
        left: scrollableContainerRef.value.scrollLeft - scrollAmount,
        behavior: "smooth",
      });
    } else {
      scrollableContainerRef.value.scrollTo({
        left: scrollableContainerRef.value.scrollLeft + scrollAmount,
        behavior: "smooth",
      });
    }
  }
};
</script>
const http = require("http");

const html = `
<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <title>Document</title>
  </head>
  <body>
    <form action="/submit-form" enctype="application/x-www-form-urlencoded" method="POST">
      <label> Enter Name: 
        <input type="text" autocomplete="name" name="name" required />
      </label>
      <input type="submit" />
    </form>
  </body>
</html>
`;
const server = http.createServer((req, res) => {
  switch (req.method) {
    case "GET":
      if (req.url === "/") {
        res.writeHead(200, { "Content-Type": "text/html" });
        res.end(html);
      } else {
        res.writeHead(404, { "Content-Type": "text/plain" });
        res.end("Page not found");
      }
      break;
    case "POST":
      if (req.url === "/submit-form") {
        let body = "";
        req.on("data", (data) => {
          body += data;
        });

        req.on("end", () => {
          console.log("Request body:  " + body);
          // Parse, validate, and sanitize
          res.writeHead(200, { "Content-Type": "application/json" });
          res.end(JSON.stringify({ body }));
        });
      } else {
        res.writeHead(404, { "Content-Type": "text/plain" });
        res.end("Page not found");
      }
      break;
    default:
      res.writeHead(405, { "Content-Type": "text/plain" });
      res.end("Method not supported");
  }
});

const PORT = process.env.PORT || 3000;

server.listen(PORT, () => {
  console.log(`Your app is listening on PORT ${PORT}`);
});
from PIL import Image, ImageDraw, ImageOps

def convert_image_to_circle(image_path):
    my_image = Image.open(image_path)
    # create mask 
    mask = Image.new('L', my_image.size)
    draw = ImageDraw.Draw(mask)
    # draw white circle
    draw.ellipse((0, 0) + mask.size, fill = 255)

    # create output image
    output = ImageOps.fit(my_image, mask.size, centering=(0.5, 0.5))
    output.putalpha(mask)
    output.save('out.png')



convert_image_to_circle('test.png')
# 引入必要的库
import os
import sys
import torch
import logging
import math
import numpy as np
from typing import Dict
import transformers
from transformers import (
    AutoConfig,
    AutoTokenizer,
    LlamaForCausalLM,
    LlamaTokenizer,
    Trainer,
    DataCollatorWithPadding,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
)

# 将上级目录添加到系统路径中,这样可以引用上级目录中的模块
sys.path.append("..")

# 引入自定义模块,包括模型配置、任务类型定义等
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from pathlib import Path
from datasets import load_dataset, concatenate_datasets
from itertools import chain
from utils.parser_args import parser_arguments
from utils.metrics import compute_metrics_for_pair
from utils.trainer import PeftTrainer, RMPeftTrainer
from trl import AutoModelForCausalLMWithValueHead
from utils.data_collator import PairDataCollatorWithPadding
from utils.utils import PROMPT_TEMPLATE

# 设置日志记录器
logger = logging.getLogger(__name__)

# 定义一个忽略索引常量,通常用于计算交叉熵时忽略某些特定的标签
IGNORE_INDEX = -100

# 定义模型类别的字典,便于后续根据类型创建模型和分词器
MODEL_CLASSES = {
    "llama": (AutoConfig, LlamaTokenizer, LlamaForCausalLM),
    "auto": (AutoConfig, AutoTokenizer, AutoModelForCausalLM),
}


# 打印模型的可训练参数数量的函数
def print_trainable_params(model: torch.nn.Module) -> None:
    # 引用自:https://github.com/LLaMA-Efficient-Tuning-main/src/utils/other.py
    # 用于计算和打印模型可训练参数和总参数的数量
    trainable_params, all_param = 0, 0
    for param in model.parameters():
        num_params = param.numel()
        # 如果使用了DS Zero 3并且权重初始化为空
        if num_params == 0 and hasattr(param, "ds_numel"):
            num_params = param.ds_numel
        all_param += num_params
        if param.requires_grad:
            trainable_params += num_params
    print(f"可训练参数数量: {trainable_params} || 总参数数量: {all_param} || 可训练参数百分比: {100 * trainable_params / all_param:.4f}")


# 创建模型的函数
def create_model(model_args, data_args, training_args):
    # 加载模型配置、分词器、模型类
    config_class, tokenizer_class, model_class = MODEL_CLASSES[model_args.model_type]
    # 如果没有提供分词器的路径,则从预训练模型路径加载分词器
    if model_args.tokenizer_name_or_path is None:
        tokenizer = tokenizer_class.from_pretrained(model_args.model_name_or_path, use_fast=model_args.use_fast_tokenizer)
    else:
        tokenizer = tokenizer_class.from_pretrained(model_args.tokenizer_name_or_path, use_fast=model_args.use_fast_tokenizer)
    # 设置pad的token id,如果分词器中没有设置pad_token_id,则默认为0
    tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id

    # 定义模型配置参数
    config_kwargs = {
        "trust_remote_code": True,
        "torch_dtype": model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype),
        "low_cpu_mem_usage": True,
    }
    # 如果需要以4bit加载模型,设置相关配置
    if model_args.load_in_4bit:
        config_kwargs["load_in_4bit"] = True
        config_kwargs["quantization_config"] = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.bfloat16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
        )

    # 从预训练模型加载
    model = model_class.from_pretrained(
        pretrained_model_name_or_path=model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
        **config_kwargs
    )

    # 如果提供了预训练模型路径,加载预训练模型
    if model_args.peft_path is not None:
        logger.info(f"加载预训练模型: {model_args.peft_path}")
        model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=True)

    else:
        logger.info("初始化新的PEFT模型")
        # 配置LoRA(Low-Rank Adaptation)的参数
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            inference_mode=False,              # 推理模式,这里设置为False,意味着在训练模式
            target_modules=training_args.lora_target.split(','),
            r=training_args.lora_rank,          # LoRA的秩(rank),影响参数数量和模型容量
            lora_alpha=training_args.lora_alpha,    # LoRA的alpha参数,用于调节LoRA适配器的学习速率
            lora_dropout=training_args.lora_dropout, # LoRA适配器中使用的dropout比率
        )
        # 使用LoRA配置获取PEFT模型
        # PEFT模型在保持预训练模型性能的同时,通过只更新LoRA适配器中的参数,减少了整体的参数量,这使得模型更适合于特定任务的微调,同时计算效率更高。
        model = get_peft_model(model, peft_config=lora_config)

    # 从预训练模型加载含有值头(value head)的因果语言模型
    # AutoModelForCausalLMWithValueHead是一个特殊的模型类型,用于强化学习或任务中需要同时预测动作和估计值(如价值函数)的情况。
    # 这类模型结合了因果语言模型(Causal Language Model)的功能和额外的“值头”(Value Head),不仅生成下一个可能的词或序列(标准的语言模型功能),
    # 还输出一个额外的值,这个值通常用来估计某个状态或动作的预期回报。这种结构在强化学习中特别有用,其中模型需要评估其行动的长期效益。
    # 这种模型的使用,特别是在结合了PEFT和LoRA适配器的情况下,可能是为了提高模型在特定任务上的性能,同时确保模型的决策是在考虑长期回报的基础上进行的。
    # 这种方法在处理复杂决策问题时非常有效,尤其是在需要考虑未来回报的策略学习中。
    # 加权组合损失: Loss=λ×LM Loss+(1−λ)×Value Head Loss
    model = AutoModelForCausalLMWithValueHead.from_pretrained(model)

    # 如果提供了预训练模型路径,并且需要加载adapter模型
    if model_args.peft_path is not None:
        lora_state_dict = torch.load(os.path.join(model_args.peft_path, 'adapter_model.bin'))
        model.v_head.load_state_dict({
            "summary.weight": lora_state_dict["v_head.summary.weight"],
            "summary.bias": lora_state_dict["v_head.summary.bias"],
        })

    # 打印模型信息
    print('*********************模型*******************')
    print_trainable_params(model)

    # 启用梯度检查点(可节省内存)
    model.gradient_checkpointing_enable()
    # 设置config.use_cache为False,这通常用于禁用transformers库中的某些缓存机制
    model.config.use_cache = False

    return model, tokenizer

# 处理数据的函数
def process_data(model_args, data_args, training_args, tokenizer):
    # 使用分词器对数据进行预处理和分词
    def process_tokenize(examples):
        # 初始化模型输入
        model_inputs = {"input_ids": [], "label_ids": []}
        # 获取数据列的名称
        columns = list(examples.keys())
        # logger.info(f"列名称: {columns}")
        # 根据数据参数获取提示模板
        template = PROMPT_TEMPLATE[data_args.template]

        # 遍历每一个示例
        for index in range(len(examples[columns[0]])):
            # 检查数据中是否包含特定的列
            if 'chosen' not in columns or 'rejected' not in columns:
                # 断言数据中必须包含instruction、input和output这三个列
                assert 'instruction' in columns and 'input' in columns and 'output' in columns

                # 获取instruction、input和output的内容
                instruction, input, output = examples['instruction'][index], examples['input'][index], examples['output'][index]
                # 如果input非空,则将其添加到instruction中
                if input is not None and input != "":
                    instruction = instruction + '\n' + input
                # 确保output的长度大于1
                assert len(output) > 1
                # 分别获取prompt、chosen和rejected的内容
                prompt, chosen, rejected = instruction, output[0], output[1]
            else:
                # 确保数据中包含prompt、rejected和chosen这三个列
                assert 'prompt' in columns and 'rejected' in columns and 'chosen' in columns
                prompt, chosen, rejected = examples['prompt'][index], examples['chosen'][index], examples['rejected'][index]

            # 使用模板格式化prompt
            source = template.format_map({'instruction': prompt})
            # 使用分词器编码source、chosen和rejected
            source_ids = tokenizer.encode(text=source, add_special_tokens=False)
            accepts_ids = tokenizer.encode(text=chosen, add_special_tokens=False)
            rejects_ids = tokenizer.encode(text=rejected, add_special_tokens=False)

            # 如果编码后的长度超过最大长度,进行截断
            if len(source_ids) > training_args.max_prompt_length - 1:
                source_ids = source_ids[:training_args.max_prompt_length - 1]
            if len(accepts_ids) > training_args.max_response_length - 1:
                accepts_ids = accepts_ids[:training_args.max_response_length - 1]
            if len(rejects_ids) > training_args.max_response_length - 1:
                rejects_ids = rejects_ids[:training_args.max_response_length - 1]

            # 构造接受和拒绝的序列及其对应的标签
            source_accepts_ids = source_ids + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_accepts_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_rejects_ids = source_ids + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]
            source_rejects_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]

            # 计算接受和拒绝序列的长度,并找到最大长度
            source_accepts_length, source_rejects_length = len(source_accepts_ids), len(source_rejects_ids)
            max_length = max(source_accepts_length, source_rejects_length)

            # 填充序列到最大长度
            source_accepts_ids = source_accepts_ids + [tokenizer.pad_token_id] * (max_length - source_accepts_length)
            source_accepts_labels = source_accepts_labels + [IGNORE_INDEX] * (max_length - source_accepts_length)
            source_rejects_ids = source_rejects_ids + [tokenizer.pad_token_id] * (max_length - source_rejects_length)
            source_rejects_labels = source_rejects_labels + [IGNORE_INDEX] * (max_length - source_rejects_length)

            # 合并接受和拒绝的序列以及标签
            inputs_ids = source_accepts_ids + source_rejects_ids
            labels = source_accepts_labels + source_rejects_labels

            # 将处理后的序列和标签添加到模型输入中
            model_inputs["input_ids"].append(inputs_ids)
            model_inputs["label_ids"].append(labels)

        return model_inputs

    # 处理数据集
    logger.info("处理数据集")
    with training_args.main_process_first(desc="处理数据集"):
        # 如果指定了数据集目录
        if data_args.dataset_dir is not None:
            all_datasets = []
            path = Path(data_args.dataset_dir)
            files = [file.name for file in path.glob("*.json")]
            for file in files:
                data_path = os.path.join(path, file)
                # 从json文件加载数据集
                raw_dataset = load_dataset(
                    "json",
                    data_files=data_path,
                )
                columns = list(raw_dataset.column_names.values())[0]
                # 使用分词函数处理数据集
                tokenized_data = raw_dataset.map(
                    process_tokenize,
                    batched=True,
                    num_proc=training_args.dataloader_num_workers,
                    remove_columns=columns,
                    load_from_cache_file=True
                )
                # 将处理后的数据集添加到列表中
                all_datasets.append(tokenized_data['train'])
            # 如果只有一个数据集,则直接使用,否则将多个数据集合并
            if len(all_datasets) == 1:
                all_datasets = all_datasets[0]
            else:
                all_datasets = concatenate_datasets(all_datasets)

            # 将数据集分割为训练集和测试集
            all_datasets = all_datasets.train_test_split(test_size=data_args.split_ratio)
        # 如果指定了训练文件和验证文件的路径
        elif data_args.train_file is not None and data_args.validation_file is not None:
            all_datasets = {}
            # 从json文件加载训练数据集
            raw_train_datasets = load_dataset(
                "json",
                data_files=data_args.train_file,
                cache_dir=data_args.data_cache_dir
            )
            columns = list(raw_train_datasets.column_names.values())[0]
            # 使用分词函数处理训练数据集
            all_datasets['train'] = raw_train_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
            # 从json文件加载验证数据集
            raw_valid_datasets = load_dataset(
                "json",
                data_files=data_args.validation_file,
                cache_dir=data_args.data_cache_dir
            )
            # 使用分词函数处理验证数据集
            all_datasets['test'] = raw_valid_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
        else:
            # 如果数据集文件路径不正确,则抛出错误
            raise ValueError(
                "数据集文件路径不正确。 "
                "您可以提供 --dataset_dir 或提供两个文件 --train_file 和 --validation_file。 "
            )

    return all_datasets


def main():
    # 主函数入口,解析参数,创建模型,处理数据,进行训练等

    # 解析命令行参数
    model_args, data_args, training_args = parser_arguments(logger)
    # 设置随机种子以保证实验的可重复性
    transformers.set_seed(training_args.seed)

    # 创建模型和分词器
    model, tokenizer = create_model(model_args, data_args, training_args)
    # 处理数据
    all_datasets = process_data(model_args, data_args, training_args, tokenizer)

    # 创建训练器,并传入模型、训练参数、数据集等
    trainer = RMPeftTrainer(
        model=model,
        args=training_args,
        train_dataset=all_datasets['train'] if training_args.do_train else None,
        eval_dataset=all_datasets['test'] if training_args.do_eval else None,
        tokenizer=tokenizer,
        data_collator=PairDataCollatorWithPadding(tokenizer=tokenizer),
        compute_metrics=compute_metrics_for_pair,
    )

    # 如果设置为训练模式
    if training_args.do_train:
        # 开始训练
        output = trainer.train()
        # 记录训练指标
        trainer.log_metrics("train", output.metrics)
        # 保存训练指标
        trainer.save_metrics("train", output.metrics)
        # 保存模型和训练器的状态
        trainer.save_state()
        trainer.save_model()

# 程序入口
if __name__ == "__main__":
    main()






class RMPeftTrainer(PeftTrainer):
    ...
    def compute_loss(self, model, inputs, return_outputs=False):
        # 进行模型的前向传播,计算接受情况(accepts)的因果语言模型(Causal Language Model, CLM)损失和价值估计(value)
        _, accepts_clm_loss, accepts_value = model(
            input_ids=inputs["accepts_input_ids"], 
            attention_mask=inputs["accepts_attention_mask"], 
            labels=inputs["accepts_labels"], 
            return_dict=True
        )

        # 只计算拒绝情况(rejects)的价值估计
        _, _, rejects_value = model(
            input_ids=inputs["rejects_input_ids"], 
            attention_mask=inputs["rejects_attention_mask"], 
            return_dict=True
        )
        # 在训练涉及奖励模型(reward model)的场景中,接受情况(accepts)和拒绝情况(rejects)计算内容的差异主要是因为他们在模型学习目标中扮演不同的角色。
        # 在强化学习或任何基于决策的模型训练中,区分“好”的行为和“坏”的行为是至关重要的。接受情况通常涉及模型应当增强或优化的行为,而拒绝情况涉及应当避免的行为。
        # 因此,训练中两者的处理方式和计算内容不同,以确保模型能准确学习到哪些行为会带来正面的奖励,哪些会带来负面的结果。
        # 接受情况不仅需要计算值(value),也要计算因果语言模型(CLM)的损失,因为接受情况通常是正向样本,模型需要准确预测其结果,并根据这些结果调整其行为。
        # 相比之下,拒绝情况主要关注于值的预测,因为我们关心的是模型评估其为不良选择的能力。在某些设计中,这意味着我们可能不需要从CLM损失中学习太多关于拒绝情况的信息,而更关注于如何通过值预测来避免这些情况。
        # 通常,接受情况更重要,因为它们直接关联到模型在实际应用中所追求的目标(如生成合适的响应、做出正确的决策等)。拒绝情况虽然重要,但在许多情况下它们的作用是辅助性的,主要用来提供一个对比,帮助模型学习避免不良结果。
        
        # 获取接受和拒绝标签
        accepts_labels, rejects_labels = inputs["accepts_labels"], inputs["rejects_labels"]
        
        # 生成行动掩码,以区分有效数据点和忽略索引(IGNORE_INDEX)
        accepts_action_masks = accepts_labels.ne(IGNORE_INDEX).long()
        rejects_action_masks = rejects_labels.ne(IGNORE_INDEX).long()
        
        # 使用行动掩码过滤价值估计,忽略无效的标签位置
        accepts_value = accepts_value * accepts_action_masks
        rejects_value = rejects_value * rejects_action_masks
        
        # 计算每个批次的大小
        batch_size = accepts_value.shape[0]

        # 计算有效输入的长度,以排除填充的部分
        accepts_seq_lengths = (torch.ne(inputs["accepts_input_ids"], self.tokenizer.pad_token_id).sum(-1) - 1).to(accepts_value.device)
        rejects_seq_lengths = (torch.ne(inputs["rejects_input_ids"], self.tokenizer.pad_token_id).sum(-1) - 1).to(rejects_value.device)
        
        # 提取每个序列最后一个有效token的价值估计
        accepts_end_token_value = accepts_value[torch.arange(batch_size, device=accepts_value.device), accepts_seq_lengths]
        rejects_end_token_value = rejects_value[torch.arange(batch_size, device=rejects_value.device), rejects_seq_lengths]
        
        # 根据设置选择使用最后一个奖励还是整体奖励来计算loss1
        if self.args.use_last_reward:
            # 使用最后一个token的价值估计来计算log-sigmoid损失
            loss1 = -torch.nn.functional.logsigmoid(accepts_end_token_value - rejects_end_token_value).mean()
        else:
            # 使用整个序列的价值估计来计算log-sigmoid损失
            loss1 = -torch.nn.functional.logsigmoid(accepts_value - rejects_value).mean()
        
        # 计算因果语言模型的损失权重
        loss2 = self.args.clm_loss_weight * accepts_clm_loss

        # 合并两部分损失
        loss = loss1 + loss2 
        
        # 准备输出,包括每种情况的价值估计
        outputs = dict(
            accepts_end_token_value=accepts_end_token_value,    # shape: (batch_size,)
            rejects_end_token_value=rejects_end_token_value,    # shape: (batch_size,)
        )

        # 根据参数选择返回损失值还是损失值和输出
        return (loss, outputs) if return_outputs else loss
// !-------------------------  model table row count ------------------------------------------------------

  var trElements;

  var tr_count;

  const count_row = () => {
    // console.log()

    var tbodyElement = document.querySelector(".tbody");

    trElements = tbodyElement.querySelectorAll("tr");

    var numberOfTr = trElements.length;

    console.log("Number of <tr> elements in tbody: " + numberOfTr);

    for (let i = 0; i < trElements.length; i++) {
      tr_count = i;

      setTr_countt(tr_count);

      console.log("i is ==>", i);
    }

    // document.querySelector(".td_data").innerHTML = 1;
  };

  // console.log(trElements);

  console.log("first", tr_count);

  // console.log(numberOfTr);

  // count_row();


# 引入必要的库
import os
import sys
import torch
import logging
import math
import numpy as np
from typing import Dict
import transformers
from transformers import (
    AutoConfig,
    AutoTokenizer,
    LlamaForCausalLM,
    LlamaTokenizer,
    Trainer,
    DataCollatorWithPadding,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
)

# 将上级目录添加到系统路径中,这样可以引用上级目录中的模块
sys.path.append("..")

# 引入自定义模块,包括模型配置、任务类型定义等
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from pathlib import Path
from datasets import load_dataset, concatenate_datasets
from itertools import chain
from utils.parser_args import parser_arguments
from utils.metrics import compute_metrics_for_pair
from utils.trainer import PeftTrainer, RMPeftTrainer
from trl import AutoModelForCausalLMWithValueHead
from utils.data_collator import PairDataCollatorWithPadding
from utils.utils import PROMPT_TEMPLATE

# 设置日志记录器
logger = logging.getLogger(__name__)

# 定义一个忽略索引常量,通常用于计算交叉熵时忽略某些特定的标签
IGNORE_INDEX = -100

# 定义模型类别的字典,便于后续根据类型创建模型和分词器
MODEL_CLASSES = {
    "llama": (AutoConfig, LlamaTokenizer, LlamaForCausalLM),
    "auto": (AutoConfig, AutoTokenizer, AutoModelForCausalLM),
}


# 打印模型的可训练参数数量的函数
def print_trainable_params(model: torch.nn.Module) -> None:
    # 引用自:https://github.com/LLaMA-Efficient-Tuning-main/src/utils/other.py
    # 用于计算和打印模型可训练参数和总参数的数量
    trainable_params, all_param = 0, 0
    for param in model.parameters():
        num_params = param.numel()
        # 如果使用了DS Zero 3并且权重初始化为空
        if num_params == 0 and hasattr(param, "ds_numel"):
            num_params = param.ds_numel
        all_param += num_params
        if param.requires_grad:
            trainable_params += num_params
    print(f"可训练参数数量: {trainable_params} || 总参数数量: {all_param} || 可训练参数百分比: {100 * trainable_params / all_param:.4f}")


# 创建模型的函数
def create_model(model_args, data_args, training_args):
    # 加载模型配置、分词器、模型类
    config_class, tokenizer_class, model_class = MODEL_CLASSES[model_args.model_type]
    # 如果没有提供分词器的路径,则从预训练模型路径加载分词器
    if model_args.tokenizer_name_or_path is None:
        tokenizer = tokenizer_class.from_pretrained(model_args.model_name_or_path, use_fast=model_args.use_fast_tokenizer)
    else:
        tokenizer = tokenizer_class.from_pretrained(model_args.tokenizer_name_or_path, use_fast=model_args.use_fast_tokenizer)
    # 设置pad的token id,如果分词器中没有设置pad_token_id,则默认为0
    tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id

    # 定义模型配置参数
    config_kwargs = {
        "trust_remote_code": True,
        "torch_dtype": model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype),
        "low_cpu_mem_usage": True,
    }
    # 如果需要以4bit加载模型,设置相关配置
    if model_args.load_in_4bit:
        config_kwargs["load_in_4bit"] = True
        config_kwargs["quantization_config"] = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.bfloat16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
        )

    # 从预训练模型加载
    model = model_class.from_pretrained(
        pretrained_model_name_or_path=model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
        **config_kwargs
    )

    # 如果提供了预训练模型路径,加载预训练模型
    if model_args.peft_path is not None:
        logger.info(f"加载预训练模型: {model_args.peft_path}")
        model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=True)

    else:
        logger.info("初始化新的PEFT模型")
        # 配置LoRA(Low-Rank Adaptation)的参数
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            inference_mode=False,
            target_modules=training_args.lora_target.split(','),
            r=training_args.lora_rank,
            lora_alpha=training_args.lora_alpha,
            lora_dropout=training_args.lora_dropout,
        )
        # 使用LoRA配置获取PEFT模型
        model = get_peft_model(model, peft_config=lora_config)

    # 从预训练模型加载含有值头(value head)的因果语言模型
    model = AutoModelForCausalLMWithValueHead.from_pretrained(model)

    # 如果提供了预训练模型路径,并且需要加载adapter模型
    if model_args.peft_path is not None:
        lora_state_dict = torch.load(os.path.join(model_args.peft_path, 'adapter_model.bin'))
        model.v_head.load_state_dict({
            "summary.weight": lora_state_dict["v_head.summary.weight"],
            "summary.bias": lora_state_dict["v_head.summary.bias"],
        })

    # 打印模型信息
    print('*********************模型*******************')
    print_trainable_params(model)

    # 启用梯度检查点(可节省内存)
    model.gradient_checkpointing_enable()
    # 设置config.use_cache为False,这通常用于禁用transformers库中的某些缓存机制
    model.config.use_cache = False

    return model, tokenizer

# 处理数据的函数
def process_data(model_args, data_args, training_args, tokenizer):
    # 使用分词器对数据进行预处理和分词
    def process_tokenize(examples):
        # 初始化模型输入
        model_inputs = {"input_ids": [], "label_ids": []}
        # 获取数据列的名称
        columns = list(examples.keys())
        # logger.info(f"列名称: {columns}")
        # 根据数据参数获取提示模板
        template = PROMPT_TEMPLATE[data_args.template]

        # 遍历每一个示例
        for index in range(len(examples[columns[0]])):
            # 检查数据中是否包含特定的列
            if 'chosen' not in columns or 'rejected' not in columns:
                # 断言数据中必须包含instruction、input和output这三个列
                assert 'instruction' in columns and 'input' in columns and 'output' in columns

                # 获取instruction、input和output的内容
                instruction, input, output = examples['instruction'][index], examples['input'][index], examples['output'][index]
                # 如果input非空,则将其添加到instruction中
                if input is not None and input != "":
                    instruction = instruction + '\n' + input
                # 确保output的长度大于1
                assert len(output) > 1
                # 分别获取prompt、chosen和rejected的内容
                prompt, chosen, rejected = instruction, output[0], output[1]
            else:
                # 确保数据中包含prompt、rejected和chosen这三个列
                assert 'prompt' in columns and 'rejected' in columns and 'chosen' in columns
                prompt, chosen, rejected = examples['prompt'][index], examples['chosen'][index], examples['rejected'][index]

            # 使用模板格式化prompt
            source = template.format_map({'instruction': prompt})
            # 使用分词器编码source、chosen和rejected
            source_ids = tokenizer.encode(text=source, add_special_tokens=False)
            accepts_ids = tokenizer.encode(text=chosen, add_special_tokens=False)
            rejects_ids = tokenizer.encode(text=rejected, add_special_tokens=False)

            # 如果编码后的长度超过最大长度,进行截断
            if len(source_ids) > training_args.max_prompt_length - 1:
                source_ids = source_ids[:training_args.max_prompt_length - 1]
            if len(accepts_ids) > training_args.max_response_length - 1:
                accepts_ids = accepts_ids[:training_args.max_response_length - 1]
            if len(rejects_ids) > training_args.max_response_length - 1:
                rejects_ids = rejects_ids[:training_args.max_response_length - 1]

            # 构造接受和拒绝的序列及其对应的标签
            source_accepts_ids = source_ids + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_accepts_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_rejects_ids = source_ids + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]
            source_rejects_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]

            # 计算接受和拒绝序列的长度,并找到最大长度
            source_accepts_length, source_rejects_length = len(source_accepts_ids), len(source_rejects_ids)
            max_length = max(source_accepts_length, source_rejects_length)

            # 填充序列到最大长度
            source_accepts_ids = source_accepts_ids + [tokenizer.pad_token_id] * (max_length - source_accepts_length)
            source_accepts_labels = source_accepts_labels + [IGNORE_INDEX] * (max_length - source_accepts_length)
            source_rejects_ids = source_rejects_ids + [tokenizer.pad_token_id] * (max_length - source_rejects_length)
            source_rejects_labels = source_rejects_labels + [IGNORE_INDEX] * (max_length - source_rejects_length)

            # 合并接受和拒绝的序列以及标签
            inputs_ids = source_accepts_ids + source_rejects_ids
            labels = source_accepts_labels + source_rejects_labels

            # 将处理后的序列和标签添加到模型输入中
            model_inputs["input_ids"].append(inputs_ids)
            model_inputs["label_ids"].append(labels)

        return model_inputs

    # 处理数据集
    logger.info("处理数据集")
    with training_args.main_process_first(desc="处理数据集"):
        # 如果指定了数据集目录
        if data_args.dataset_dir is not None:
            all_datasets = []
            path = Path(data_args.dataset_dir)
            files = [file.name for file in path.glob("*.json")]
            for file in files:
                data_path = os.path.join(path, file)
                # 从json文件加载数据集
                raw_dataset = load_dataset(
                    "json",
                    data_files=data_path,
                )
                columns = list(raw_dataset.column_names.values())[0]
                # 使用分词函数处理数据集
                tokenized_data = raw_dataset.map(
                    process_tokenize,
                    batched=True,
                    num_proc=training_args.dataloader_num_workers,
                    remove_columns=columns,
                    load_from_cache_file=True
                )
                # 将处理后的数据集添加到列表中
                all_datasets.append(tokenized_data['train'])
            # 如果只有一个数据集,则直接使用,否则将多个数据集合并
            if len(all_datasets) == 1:
                all_datasets = all_datasets[0]
            else:
                all_datasets = concatenate_datasets(all_datasets)

            # 将数据集分割为训练集和测试集
            all_datasets = all_datasets.train_test_split(test_size=data_args.split_ratio)
        # 如果指定了训练文件和验证文件的路径
        elif data_args.train_file is not None and data_args.validation_file is not None:
            all_datasets = {}
            # 从json文件加载训练数据集
            raw_train_datasets = load_dataset(
                "json",
                data_files=data_args.train_file,
                cache_dir=data_args.data_cache_dir
            )
            columns = list(raw_train_datasets.column_names.values())[0]
            # 使用分词函数处理训练数据集
            all_datasets['train'] = raw_train_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
            # 从json文件加载验证数据集
            raw_valid_datasets = load_dataset(
                "json",
                data_files=data_args.validation_file,
                cache_dir=data_args.data_cache_dir
            )
            # 使用分词函数处理验证数据集
            all_datasets['test'] = raw_valid_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
        else:
            # 如果数据集文件路径不正确,则抛出错误
            raise ValueError(
                "数据集文件路径不正确。 "
                "您可以提供 --dataset_dir 或提供两个文件 --train_file 和 --validation_file。 "
            )

    return all_datasets


def main():
    # 主函数入口,解析参数,创建模型,处理数据,进行训练等

    # 解析命令行参数
    model_args, data_args, training_args = parser_arguments(logger)
    # 设置随机种子以保证实验的可重复性
    transformers.set_seed(training_args.seed)

    # 创建模型和分词器
    model, tokenizer = create_model(model_args, data_args, training_args)
    # 处理数据
    all_datasets = process_data(model_args, data_args, training_args, tokenizer)

    # 创建训练器,并传入模型、训练参数、数据集等
    trainer = RMPeftTrainer(
        model=model,
        args=training_args,
        train_dataset=all_datasets['train'] if training_args.do_train else None,
        eval_dataset=all_datasets['test'] if training_args.do_eval else None,
        tokenizer=tokenizer,
        data_collator=PairDataCollatorWithPadding(tokenizer=tokenizer),
        compute_metrics=compute_metrics_for_pair,
    )

    # 如果设置为训练模式
    if training_args.do_train:
        # 开始训练
        output = trainer.train()
        # 记录训练指标
        trainer.log_metrics("train", output.metrics)
        # 保存训练指标
        trainer.save_metrics("train", output.metrics)
        # 保存模型和训练器的状态
        trainer.save_state()
        trainer.save_model()

# 程序入口
if __name__ == "__main__":
    main()
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols

tips=sns.load_dataset('tips')
tips

model=ols('total_bill~day',data=tips).fit()
one_anova_table=sm.stats.anova_lm(model,typ=1)
print("\n One way anova result based on day and time:")
print(one_anova_table)

p_val=0.042454

if p_val1>0.05:
    print('Accept H0')
else:
    print('Reject H0')
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols

tips=sns.load_dataset('tips')
tips

model=ols('total_bill~day+time',data=tips).fit()
two_anova_table=sm.stats.anova_lm(model,typ=2)
print("\n Two anova result based on day and time:")
print(two_anova_table)

p_val1=0.510480
p_val2=0.127347

if p_val1>0.05:
    print('Accept H0')
else:
    print('Reject H0')

if p_val2>0.05:
    print('Accept H0')
else:
    print('Reject H0')
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols

y=(320,428,353,331,358,400,372,455,375,328,383,308,350,417,400,325, 378,275,340, 360, 356,370,395,375,398,358,334,340,320,430)
y

t=('v1','v1','v1','v1','v1','v1','v2','v2','v2','v2','v2','v2','v3','v3','v3','v3','v3','v3','v4','v4','v4','v4','v4','v4','v5','v5','v5','v5','v5','v5',)
t
   
df=pd.DataFrame({'Yield':y,'Treatment':t})
print(df)                 

model=ols('Yield~Treatment',data=df).fit()
crd_table=sm.stats.anova_lm(model,typ=1)
print(crd_table)

p_val= 0.990685

if p_val>0.05:
    print('Accept H0 ')
else:
    print('Reject H0 ')
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols

y=(24.7,20.6,27.7,16.2,16.2,24.9,27.3,28.8,22.7,15,13,22.5,38.5,39.5,36.8,19.5,15.4,26.3,28.5,31,34.9,14.1,17.7,22.6)
print(y)

b=('A', 'A', 'A', 'A', 'A', 'A', '8', '8', 'B', 'B', 'B', 'B', 'C', 'C ', 'C ', 'C', 'C','C','D', 'D', 'D', 'D', 'D','D' )
print(b)

t=('1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', )
print(t)

df=pd.DataFrame({'Yield':y,'Treatment':t,'Blocks':b})
print(df)

model=ols('Yield~Treatment+Blocks',data=df).fit()
rbd_table=sm.stats.anova_lm(model,typ=2)
print(rbd_table)

p_val_t=0.000545
p_val_b=0.019082

if p_val_t>0.05:
    print('Accept H0 for treatments')
else:
    print('Reject H0 for treatments')

if p_val_b>0.05:
    print('Accept H0 for blocks')
else:
    print('Reject H0 for blocks')    

<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <link rel="stylesheet" href="css/style.css" />
    <link
      rel="stylesheet"
      href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.2.1/css/all.min.css"
    />
    <script defer src="js/script.js"></script>
    <title>Music Player | OnlineITtuts Tutorials</title>
  </head>
  <body>
    <div class="background">
      <img src="imgs_audio/img-1.jpg" id="bg_img" />
    </div>
<div class="container">
      <!--===============Player Image================= -->
      <div class="player_img">
        <img src="imgs_audio/img-1.jpg" id="cover" class="active" />
      </div>
      <!--============Player Content -->
      <h2 id="music_title">Capital Letters</h2>
      <h3 id="musric_artist">Lorem, ipsum dolor.</h3>
      <!--==============Player Progress & Timmer -->
      <div class="player_progress" id="player_progress">
        <div class="progress" id="progress">
          <div class="music_duration">
            <span id="current_time">0:00</span>
            <span id="duration">0:00</span>
          </div>
        </div>
      </div>
      <!--==============Player Controllers -->
      <div class="player_controls">
        <i class="fa-solid fa-backward" title="Previous" id="prev"></i>
        <i class="fa-solid fa-play play-button" title="Play" id="play"></i>
        <i class="fa-solid fa-forward" title="Next" id="next"></i>
      </div>
    </div>
  </body>
</html>
<script src="https://cloud.ccm19.de/app.js?apiKey=8adfabfc1f648282356beb71e69d8d58ec227cbcced11fb7&amp;domain=6622ff47d0829a86d9046e82" referrerpolicy="origin"></script>
<script src="https://cloud.ccm19.de/app.js?apiKey=8adfabfc1f648282356beb71e69d8d58ec227cbcced11fb7&amp;domain=6622ff47d0829a86d9046e82" referrerpolicy="origin"></script>
 https://gist.github.com/aalexandr21/23794f7473a46de2e024a2542afb3155

I added it here https://prnt.sc/5Oc1TgkXsaGc
And to listings that you need to hide, I added the CSS class hide_when_use_filter https://prnt.sc/XaE35kAG5Ddp

/**Snippet**/


jQuery(document).ready(function ($) {
    document.addEventListener('jet-smart-filters/inited', function (initEvent) {
        JetSmartFilters.events.subscribe('activeItems/change', function (activeItems) {
            if (activeItems && activeItems.length >= 1) {
                $(".hide_when_use_filter").slideUp();
            }
            else {
                $(".hide_when_use_filter").slideDown();
            };
        });
    })
});
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
star

Mon Apr 22 2024 19:40:54 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 19:21:37 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 18:37:29 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 18:34:34 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 18:31:26 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 16:29:09 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 16:20:29 GMT+0000 (Coordinated Universal Time)

@Amlan #c

star

Mon Apr 22 2024 14:53:54 GMT+0000 (Coordinated Universal Time)

@Angel

star

Mon Apr 22 2024 10:14:20 GMT+0000 (Coordinated Universal Time)

@nishpod

star

Mon Apr 22 2024 09:11:20 GMT+0000 (Coordinated Universal Time) https://trailhead.salesforce.com/content/learn/modules/visualforce_fundamentals/visualforce_standard_controllers?trailmix_creator_id

@Mannan2105 #java

star

Mon Apr 22 2024 09:10:05 GMT+0000 (Coordinated Universal Time) https://trailhead.salesforce.com/content/learn/modules/visualforce_fundamentals/visualforce_standard_controllers?trailmix_creator_id

@Mannan2105 #java

star

Mon Apr 22 2024 06:26:16 GMT+0000 (Coordinated Universal Time)

@signup

star

Mon Apr 22 2024 06:03:54 GMT+0000 (Coordinated Universal Time)

@signup

star

Mon Apr 22 2024 06:03:16 GMT+0000 (Coordinated Universal Time) https://trailhead.salesforce.com/content/learn/modules/visualforce_fundamentals/visualforce_creating_pages?trailmix_creator_id

@Mannan2105

star

Mon Apr 22 2024 05:55:37 GMT+0000 (Coordinated Universal Time)

@shirogan3x

star

Mon Apr 22 2024 05:55:12 GMT+0000 (Coordinated Universal Time)

@shirogan3x

star

Mon Apr 22 2024 05:37:58 GMT+0000 (Coordinated Universal Time)

@signup

star

Mon Apr 22 2024 03:16:12 GMT+0000 (Coordinated Universal Time)

@darshcode #sql

star

Sun Apr 21 2024 20:36:39 GMT+0000 (Coordinated Universal Time)

@harunmunjal #java

star

Sun Apr 21 2024 15:33:49 GMT+0000 (Coordinated Universal Time)

@absarhan

star

Sun Apr 21 2024 13:05:26 GMT+0000 (Coordinated Universal Time)

@Divya ##array

star

Sun Apr 21 2024 12:45:49 GMT+0000 (Coordinated Universal Time)

@kubo56

star

Sun Apr 21 2024 12:12:38 GMT+0000 (Coordinated Universal Time) https://codevalidator.aut.ac.nz/autmoodle1/mod/quiz/attempt.php?attempt

@meanaspotato

star

Sun Apr 21 2024 11:23:25 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sun Apr 21 2024 11:23:02 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sun Apr 21 2024 11:20:42 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sun Apr 21 2024 08:03:42 GMT+0000 (Coordinated Universal Time)

@mubashar

star

Sun Apr 21 2024 06:59:35 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sat Apr 20 2024 22:31:44 GMT+0000 (Coordinated Universal Time)

@davidmchale #width #min()

star

Sat Apr 20 2024 22:09:02 GMT+0000 (Coordinated Universal Time)

@davidmchale #variable #declaration

star

Sat Apr 20 2024 18:11:50 GMT+0000 (Coordinated Universal Time)

@Paloma #bash

star

Sat Apr 20 2024 17:03:31 GMT+0000 (Coordinated Universal Time)

@Paloma #typescript

star

Sat Apr 20 2024 14:14:25 GMT+0000 (Coordinated Universal Time) https://darkwebmarketbuyer.com/product/acxion-phentermine-c-30-mg-30-caps/

@darkwebmarket

star

Sat Apr 20 2024 13:17:46 GMT+0000 (Coordinated Universal Time) https://blog.logrocket.com/5-ways-make-http-requests-node-js/

@USFAkbari #javascript

star

Sat Apr 20 2024 10:35:22 GMT+0000 (Coordinated Universal Time)

@freepythoncode ##python #coding #python #image #pil #pillow

star

Sat Apr 20 2024 06:23:38 GMT+0000 (Coordinated Universal Time) https://paste.openstack.org/show/b7duBLP15n0fCjivoqER/

@yichaojoey

star

Sat Apr 20 2024 06:04:55 GMT+0000 (Coordinated Universal Time)

@codeing

star

Sat Apr 20 2024 05:53:34 GMT+0000 (Coordinated Universal Time) https://paste.openstack.org/show/bTCdkp0FxYQoa1f9JnRT/

@yichaojoey

star

Sat Apr 20 2024 02:26:31 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 02:25:43 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 01:42:26 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 01:39:53 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 00:33:52 GMT+0000 (Coordinated Universal Time) https://arhaanali.medium.com/build-a-music-player-with-html-css-and-javascript-53ff3e168e1e

@Chijunior

star

Fri Apr 19 2024 23:42:19 GMT+0000 (Coordinated Universal Time) https://cloud.ccm19.de/domains/6622ff47d0829a86d9046e82/dashboard

@Angel

star

Fri Apr 19 2024 23:37:05 GMT+0000 (Coordinated Universal Time) https://cloud.ccm19.de/domains/6622ff47d0829a86d9046e82/dashboard

@Angel

star

Fri Apr 19 2024 22:18:17 GMT+0000 (Coordinated Universal Time) https://learn.microsoft.com/en-us/windows/wsl/install

@dw

star

Fri Apr 19 2024 20:03:25 GMT+0000 (Coordinated Universal Time)

@Y@sir #filter #jetsmartfilters

star

Fri Apr 19 2024 19:33:05 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension