Snippets Collections
#include <stdio.h>

int main(void)
{
    int number;
    int result = 1;

    // Prompt the user to input a non-negative whole number
    printf("Enter a non-negative whole number: ");
    scanf("%d", &number);

    // Check if the input is negative
    if (number < 0)
    {
        printf("\nBad Input! %d is negative...\n", number);
        return 1; // Terminate the program with an error code
    }

    // Compute the factorial
    for (int i = number; i >= 1; i--)
    {
        result *= i;
    }

    // Display the factorial result
    printf("\n%d! is %d\n", number, result);

    return 0;
}
#include <stdio.h>

int main()
{
    int start;
    int stop;
    int step;
    int iteration;
    
    printf("Starting number:\n");
    scanf("%d",&start);
    printf("Stopping number:\n");
    scanf("%d",&stop);
    printf("Step size:\n");
    scanf("%d",&step);
    
    printf("Using a for loop:\n");
    printf("\nStarting at %d...\n\n",start);
    
    for(int i = start; i<=stop;i+=step)
    {
        printf("In loop: %d...\n",i);
        iteration++;
    }
    
    
    printf("\nStopping at %d...\n",stop);
    printf("\nThis loop did %d iterations.\n",iteration);
    
    
    printf("\n");
    printf("\n");


    return 0;
}
input::file-selector-button{
  border-radius: 100px;
  padding: 8px 30px;
  margin-right: 12px;
  background: #F4F5F0;
  border: none;
}
@media (max-width: 640px) {
    .flex-col>* {
        width: 100%;
    }
}
.scrollable-description {
  max-height: 500px; /* Adjust this value as needed */
  overflow-y: auto;
}
.grad{
  background: linear-gradient(176deg, rgba(244, 245, 240, 0.00) -5.82%, #F4F5F0 97.33%);
}
#include <stdio.h>

int main()
{
    char rank;
    char suit;
    char valid_rank[100];
    char valid_suit[100];

    printf("Suit (d/h/s/c):\n");
    scanf(" %c", &suit);

    printf("Rank (A/2/3/4/5/6/7/8/9/T/J/Q/K):\n");
    scanf(" %c", &rank);

    switch(rank)
    {
        case 'A':
            sprintf(valid_rank, "A of");
            break;

        case '2':
            sprintf(valid_rank, "Two of");
            break;

        case '3':
            sprintf(valid_rank, "Three of");
            break;

        case '4':
            sprintf(valid_rank, "Four of");
            break;

        case '5':
            sprintf(valid_rank, "Five of");
            break;

        case '6':
            sprintf(valid_rank, "Six of");
            break;

        case '7':
            sprintf(valid_rank, "Seven of");
            break;

        case '8':
            sprintf(valid_rank, "Eight of");
            break;

        case '9':
            sprintf(valid_rank, "Nine of");
            break;

        case 'T':
            sprintf(valid_rank, "Ten of");
            break;

        case 'J':
            sprintf(valid_rank, "Jack of");
            break;

        case 'Q':
            sprintf(valid_rank, "Queen of");
            break;

        case 'K':
            sprintf(valid_rank, "King of");
            break;

        default:
            printf("Invalid Rank\n");
            return 1;
    }

    switch(suit)
    {
        case 'd':
            sprintf(valid_suit, " Diamonds");
            break;

        case 'h':
            sprintf(valid_suit, " Heart");
            break;

        case 's':
            sprintf(valid_suit, " Spades");
            break;

        case 'c':
            sprintf(valid_suit, " Clubs");
            break;

        default:
            printf("Invalid Suit\n");
            return 1;
    }

    printf("%s%s\n", valid_rank, valid_suit);

    return 0;
}
const s = 'Hello World';

console.log(s);
console.log(s.__proto__); // view in console to get all properties to use on a string

// or use
console.dir(s)
.element {
  width: min(90%, 1200px); /* Set a minimum width of 90% or 1200px, whichever is larger */
}
//decalring multiple variables at once
let a, b, c;
a = "days of summer";

const d = 10,
  e = 20,
  f = 30; // cannot be reassigned but used as an initial value for something

const baseNumber = d;

function getNumber(baseNumber, str) {
  return `${baseNumber} ${str}`;
}

console.log("get Number => ", getNumber(d, a));
git pull origin [branch name]
git add .
git commit -m ""
git push origin [branch name]
git remote -v
git remote add origin 
git remote remove origin
git checkout -b (create a branch)
git checkout (switch branch)
git rebase --continue 
git merge --continue
git push origin [branch name] --force-with-lease
<script setup lang="ts">
import { ref } from "vue";

type CarouselElement = {
  link: string;
  image: string;
  label: string;
  countryCode?: string;
};

interface Props {
  carouselElements: CarouselElement[];
}

const props = withDefaults(defineProps<Props>(), {
  carouselElements: () => [
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
    {
      link: "/",
      image: "/assets/images/100.webp",
      label: "Phenom 100",
    },
  ],
});
const leftArrowRef = ref<HTMLButtonElement | null>(null);
const rightArrowRef = ref<HTMLButtonElement | null>(null);
const showArrows = ref(false);
const scrollableContainerRef = ref<HTMLDivElement | null>(null);

const scroll = (direction: "left" | "right") => {
  if (scrollableContainerRef.value) {
    const scrollAmount = 300;

    if (direction === "left") {
      scrollableContainerRef.value.scrollTo({
        left: scrollableContainerRef.value.scrollLeft - scrollAmount,
        behavior: "smooth",
      });
    } else {
      scrollableContainerRef.value.scrollTo({
        left: scrollableContainerRef.value.scrollLeft + scrollAmount,
        behavior: "smooth",
      });
    }
  }
};
</script>
const http = require("http");

const html = `
<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <title>Document</title>
  </head>
  <body>
    <form action="/submit-form" enctype="application/x-www-form-urlencoded" method="POST">
      <label> Enter Name: 
        <input type="text" autocomplete="name" name="name" required />
      </label>
      <input type="submit" />
    </form>
  </body>
</html>
`;
const server = http.createServer((req, res) => {
  switch (req.method) {
    case "GET":
      if (req.url === "/") {
        res.writeHead(200, { "Content-Type": "text/html" });
        res.end(html);
      } else {
        res.writeHead(404, { "Content-Type": "text/plain" });
        res.end("Page not found");
      }
      break;
    case "POST":
      if (req.url === "/submit-form") {
        let body = "";
        req.on("data", (data) => {
          body += data;
        });

        req.on("end", () => {
          console.log("Request body:  " + body);
          // Parse, validate, and sanitize
          res.writeHead(200, { "Content-Type": "application/json" });
          res.end(JSON.stringify({ body }));
        });
      } else {
        res.writeHead(404, { "Content-Type": "text/plain" });
        res.end("Page not found");
      }
      break;
    default:
      res.writeHead(405, { "Content-Type": "text/plain" });
      res.end("Method not supported");
  }
});

const PORT = process.env.PORT || 3000;

server.listen(PORT, () => {
  console.log(`Your app is listening on PORT ${PORT}`);
});
from PIL import Image, ImageDraw, ImageOps

def convert_image_to_circle(image_path):
    my_image = Image.open(image_path)
    # create mask 
    mask = Image.new('L', my_image.size)
    draw = ImageDraw.Draw(mask)
    # draw white circle
    draw.ellipse((0, 0) + mask.size, fill = 255)

    # create output image
    output = ImageOps.fit(my_image, mask.size, centering=(0.5, 0.5))
    output.putalpha(mask)
    output.save('out.png')



convert_image_to_circle('test.png')
# 引入必要的库
import os
import sys
import torch
import logging
import math
import numpy as np
from typing import Dict
import transformers
from transformers import (
    AutoConfig,
    AutoTokenizer,
    LlamaForCausalLM,
    LlamaTokenizer,
    Trainer,
    DataCollatorWithPadding,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
)

# 将上级目录添加到系统路径中,这样可以引用上级目录中的模块
sys.path.append("..")

# 引入自定义模块,包括模型配置、任务类型定义等
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from pathlib import Path
from datasets import load_dataset, concatenate_datasets
from itertools import chain
from utils.parser_args import parser_arguments
from utils.metrics import compute_metrics_for_pair
from utils.trainer import PeftTrainer, RMPeftTrainer
from trl import AutoModelForCausalLMWithValueHead
from utils.data_collator import PairDataCollatorWithPadding
from utils.utils import PROMPT_TEMPLATE

# 设置日志记录器
logger = logging.getLogger(__name__)

# 定义一个忽略索引常量,通常用于计算交叉熵时忽略某些特定的标签
IGNORE_INDEX = -100

# 定义模型类别的字典,便于后续根据类型创建模型和分词器
MODEL_CLASSES = {
    "llama": (AutoConfig, LlamaTokenizer, LlamaForCausalLM),
    "auto": (AutoConfig, AutoTokenizer, AutoModelForCausalLM),
}


# 打印模型的可训练参数数量的函数
def print_trainable_params(model: torch.nn.Module) -> None:
    # 引用自:https://github.com/LLaMA-Efficient-Tuning-main/src/utils/other.py
    # 用于计算和打印模型可训练参数和总参数的数量
    trainable_params, all_param = 0, 0
    for param in model.parameters():
        num_params = param.numel()
        # 如果使用了DS Zero 3并且权重初始化为空
        if num_params == 0 and hasattr(param, "ds_numel"):
            num_params = param.ds_numel
        all_param += num_params
        if param.requires_grad:
            trainable_params += num_params
    print(f"可训练参数数量: {trainable_params} || 总参数数量: {all_param} || 可训练参数百分比: {100 * trainable_params / all_param:.4f}")


# 创建模型的函数
def create_model(model_args, data_args, training_args):
    # 加载模型配置、分词器、模型类
    config_class, tokenizer_class, model_class = MODEL_CLASSES[model_args.model_type]
    # 如果没有提供分词器的路径,则从预训练模型路径加载分词器
    if model_args.tokenizer_name_or_path is None:
        tokenizer = tokenizer_class.from_pretrained(model_args.model_name_or_path, use_fast=model_args.use_fast_tokenizer)
    else:
        tokenizer = tokenizer_class.from_pretrained(model_args.tokenizer_name_or_path, use_fast=model_args.use_fast_tokenizer)
    # 设置pad的token id,如果分词器中没有设置pad_token_id,则默认为0
    tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id

    # 定义模型配置参数
    config_kwargs = {
        "trust_remote_code": True,
        "torch_dtype": model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype),
        "low_cpu_mem_usage": True,
    }
    # 如果需要以4bit加载模型,设置相关配置
    if model_args.load_in_4bit:
        config_kwargs["load_in_4bit"] = True
        config_kwargs["quantization_config"] = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.bfloat16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
        )

    # 从预训练模型加载
    model = model_class.from_pretrained(
        pretrained_model_name_or_path=model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
        **config_kwargs
    )

    # 如果提供了预训练模型路径,加载预训练模型
    if model_args.peft_path is not None:
        logger.info(f"加载预训练模型: {model_args.peft_path}")
        model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=True)

    else:
        logger.info("初始化新的PEFT模型")
        # 配置LoRA(Low-Rank Adaptation)的参数
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            inference_mode=False,              # 推理模式,这里设置为False,意味着在训练模式
            target_modules=training_args.lora_target.split(','),
            r=training_args.lora_rank,          # LoRA的秩(rank),影响参数数量和模型容量
            lora_alpha=training_args.lora_alpha,    # LoRA的alpha参数,用于调节LoRA适配器的学习速率
            lora_dropout=training_args.lora_dropout, # LoRA适配器中使用的dropout比率
        )
        # 使用LoRA配置获取PEFT模型
        # PEFT模型在保持预训练模型性能的同时,通过只更新LoRA适配器中的参数,减少了整体的参数量,这使得模型更适合于特定任务的微调,同时计算效率更高。
        model = get_peft_model(model, peft_config=lora_config)

    # 从预训练模型加载含有值头(value head)的因果语言模型
    # AutoModelForCausalLMWithValueHead是一个特殊的模型类型,用于强化学习或任务中需要同时预测动作和估计值(如价值函数)的情况。
    # 这类模型结合了因果语言模型(Causal Language Model)的功能和额外的“值头”(Value Head),不仅生成下一个可能的词或序列(标准的语言模型功能),
    # 还输出一个额外的值,这个值通常用来估计某个状态或动作的预期回报。这种结构在强化学习中特别有用,其中模型需要评估其行动的长期效益。
    # 这种模型的使用,特别是在结合了PEFT和LoRA适配器的情况下,可能是为了提高模型在特定任务上的性能,同时确保模型的决策是在考虑长期回报的基础上进行的。
    # 这种方法在处理复杂决策问题时非常有效,尤其是在需要考虑未来回报的策略学习中。
    # 加权组合损失: Loss=λ×LM Loss+(1−λ)×Value Head Loss
    model = AutoModelForCausalLMWithValueHead.from_pretrained(model)

    # 如果提供了预训练模型路径,并且需要加载adapter模型
    if model_args.peft_path is not None:
        lora_state_dict = torch.load(os.path.join(model_args.peft_path, 'adapter_model.bin'))
        model.v_head.load_state_dict({
            "summary.weight": lora_state_dict["v_head.summary.weight"],
            "summary.bias": lora_state_dict["v_head.summary.bias"],
        })

    # 打印模型信息
    print('*********************模型*******************')
    print_trainable_params(model)

    # 启用梯度检查点(可节省内存)
    model.gradient_checkpointing_enable()
    # 设置config.use_cache为False,这通常用于禁用transformers库中的某些缓存机制
    model.config.use_cache = False

    return model, tokenizer

# 处理数据的函数
def process_data(model_args, data_args, training_args, tokenizer):
    # 使用分词器对数据进行预处理和分词
    def process_tokenize(examples):
        # 初始化模型输入
        model_inputs = {"input_ids": [], "label_ids": []}
        # 获取数据列的名称
        columns = list(examples.keys())
        # logger.info(f"列名称: {columns}")
        # 根据数据参数获取提示模板
        template = PROMPT_TEMPLATE[data_args.template]

        # 遍历每一个示例
        for index in range(len(examples[columns[0]])):
            # 检查数据中是否包含特定的列
            if 'chosen' not in columns or 'rejected' not in columns:
                # 断言数据中必须包含instruction、input和output这三个列
                assert 'instruction' in columns and 'input' in columns and 'output' in columns

                # 获取instruction、input和output的内容
                instruction, input, output = examples['instruction'][index], examples['input'][index], examples['output'][index]
                # 如果input非空,则将其添加到instruction中
                if input is not None and input != "":
                    instruction = instruction + '\n' + input
                # 确保output的长度大于1
                assert len(output) > 1
                # 分别获取prompt、chosen和rejected的内容
                prompt, chosen, rejected = instruction, output[0], output[1]
            else:
                # 确保数据中包含prompt、rejected和chosen这三个列
                assert 'prompt' in columns and 'rejected' in columns and 'chosen' in columns
                prompt, chosen, rejected = examples['prompt'][index], examples['chosen'][index], examples['rejected'][index]

            # 使用模板格式化prompt
            source = template.format_map({'instruction': prompt})
            # 使用分词器编码source、chosen和rejected
            source_ids = tokenizer.encode(text=source, add_special_tokens=False)
            accepts_ids = tokenizer.encode(text=chosen, add_special_tokens=False)
            rejects_ids = tokenizer.encode(text=rejected, add_special_tokens=False)

            # 如果编码后的长度超过最大长度,进行截断
            if len(source_ids) > training_args.max_prompt_length - 1:
                source_ids = source_ids[:training_args.max_prompt_length - 1]
            if len(accepts_ids) > training_args.max_response_length - 1:
                accepts_ids = accepts_ids[:training_args.max_response_length - 1]
            if len(rejects_ids) > training_args.max_response_length - 1:
                rejects_ids = rejects_ids[:training_args.max_response_length - 1]

            # 构造接受和拒绝的序列及其对应的标签
            source_accepts_ids = source_ids + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_accepts_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_rejects_ids = source_ids + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]
            source_rejects_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]

            # 计算接受和拒绝序列的长度,并找到最大长度
            source_accepts_length, source_rejects_length = len(source_accepts_ids), len(source_rejects_ids)
            max_length = max(source_accepts_length, source_rejects_length)

            # 填充序列到最大长度
            source_accepts_ids = source_accepts_ids + [tokenizer.pad_token_id] * (max_length - source_accepts_length)
            source_accepts_labels = source_accepts_labels + [IGNORE_INDEX] * (max_length - source_accepts_length)
            source_rejects_ids = source_rejects_ids + [tokenizer.pad_token_id] * (max_length - source_rejects_length)
            source_rejects_labels = source_rejects_labels + [IGNORE_INDEX] * (max_length - source_rejects_length)

            # 合并接受和拒绝的序列以及标签
            inputs_ids = source_accepts_ids + source_rejects_ids
            labels = source_accepts_labels + source_rejects_labels

            # 将处理后的序列和标签添加到模型输入中
            model_inputs["input_ids"].append(inputs_ids)
            model_inputs["label_ids"].append(labels)

        return model_inputs

    # 处理数据集
    logger.info("处理数据集")
    with training_args.main_process_first(desc="处理数据集"):
        # 如果指定了数据集目录
        if data_args.dataset_dir is not None:
            all_datasets = []
            path = Path(data_args.dataset_dir)
            files = [file.name for file in path.glob("*.json")]
            for file in files:
                data_path = os.path.join(path, file)
                # 从json文件加载数据集
                raw_dataset = load_dataset(
                    "json",
                    data_files=data_path,
                )
                columns = list(raw_dataset.column_names.values())[0]
                # 使用分词函数处理数据集
                tokenized_data = raw_dataset.map(
                    process_tokenize,
                    batched=True,
                    num_proc=training_args.dataloader_num_workers,
                    remove_columns=columns,
                    load_from_cache_file=True
                )
                # 将处理后的数据集添加到列表中
                all_datasets.append(tokenized_data['train'])
            # 如果只有一个数据集,则直接使用,否则将多个数据集合并
            if len(all_datasets) == 1:
                all_datasets = all_datasets[0]
            else:
                all_datasets = concatenate_datasets(all_datasets)

            # 将数据集分割为训练集和测试集
            all_datasets = all_datasets.train_test_split(test_size=data_args.split_ratio)
        # 如果指定了训练文件和验证文件的路径
        elif data_args.train_file is not None and data_args.validation_file is not None:
            all_datasets = {}
            # 从json文件加载训练数据集
            raw_train_datasets = load_dataset(
                "json",
                data_files=data_args.train_file,
                cache_dir=data_args.data_cache_dir
            )
            columns = list(raw_train_datasets.column_names.values())[0]
            # 使用分词函数处理训练数据集
            all_datasets['train'] = raw_train_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
            # 从json文件加载验证数据集
            raw_valid_datasets = load_dataset(
                "json",
                data_files=data_args.validation_file,
                cache_dir=data_args.data_cache_dir
            )
            # 使用分词函数处理验证数据集
            all_datasets['test'] = raw_valid_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
        else:
            # 如果数据集文件路径不正确,则抛出错误
            raise ValueError(
                "数据集文件路径不正确。 "
                "您可以提供 --dataset_dir 或提供两个文件 --train_file 和 --validation_file。 "
            )

    return all_datasets


def main():
    # 主函数入口,解析参数,创建模型,处理数据,进行训练等

    # 解析命令行参数
    model_args, data_args, training_args = parser_arguments(logger)
    # 设置随机种子以保证实验的可重复性
    transformers.set_seed(training_args.seed)

    # 创建模型和分词器
    model, tokenizer = create_model(model_args, data_args, training_args)
    # 处理数据
    all_datasets = process_data(model_args, data_args, training_args, tokenizer)

    # 创建训练器,并传入模型、训练参数、数据集等
    trainer = RMPeftTrainer(
        model=model,
        args=training_args,
        train_dataset=all_datasets['train'] if training_args.do_train else None,
        eval_dataset=all_datasets['test'] if training_args.do_eval else None,
        tokenizer=tokenizer,
        data_collator=PairDataCollatorWithPadding(tokenizer=tokenizer),
        compute_metrics=compute_metrics_for_pair,
    )

    # 如果设置为训练模式
    if training_args.do_train:
        # 开始训练
        output = trainer.train()
        # 记录训练指标
        trainer.log_metrics("train", output.metrics)
        # 保存训练指标
        trainer.save_metrics("train", output.metrics)
        # 保存模型和训练器的状态
        trainer.save_state()
        trainer.save_model()

# 程序入口
if __name__ == "__main__":
    main()






class RMPeftTrainer(PeftTrainer):
    ...
    def compute_loss(self, model, inputs, return_outputs=False):
        # 进行模型的前向传播,计算接受情况(accepts)的因果语言模型(Causal Language Model, CLM)损失和价值估计(value)
        _, accepts_clm_loss, accepts_value = model(
            input_ids=inputs["accepts_input_ids"], 
            attention_mask=inputs["accepts_attention_mask"], 
            labels=inputs["accepts_labels"], 
            return_dict=True
        )

        # 只计算拒绝情况(rejects)的价值估计
        _, _, rejects_value = model(
            input_ids=inputs["rejects_input_ids"], 
            attention_mask=inputs["rejects_attention_mask"], 
            return_dict=True
        )
        # 在训练涉及奖励模型(reward model)的场景中,接受情况(accepts)和拒绝情况(rejects)计算内容的差异主要是因为他们在模型学习目标中扮演不同的角色。
        # 在强化学习或任何基于决策的模型训练中,区分“好”的行为和“坏”的行为是至关重要的。接受情况通常涉及模型应当增强或优化的行为,而拒绝情况涉及应当避免的行为。
        # 因此,训练中两者的处理方式和计算内容不同,以确保模型能准确学习到哪些行为会带来正面的奖励,哪些会带来负面的结果。
        # 接受情况不仅需要计算值(value),也要计算因果语言模型(CLM)的损失,因为接受情况通常是正向样本,模型需要准确预测其结果,并根据这些结果调整其行为。
        # 相比之下,拒绝情况主要关注于值的预测,因为我们关心的是模型评估其为不良选择的能力。在某些设计中,这意味着我们可能不需要从CLM损失中学习太多关于拒绝情况的信息,而更关注于如何通过值预测来避免这些情况。
        # 通常,接受情况更重要,因为它们直接关联到模型在实际应用中所追求的目标(如生成合适的响应、做出正确的决策等)。拒绝情况虽然重要,但在许多情况下它们的作用是辅助性的,主要用来提供一个对比,帮助模型学习避免不良结果。
        
        # 获取接受和拒绝标签
        accepts_labels, rejects_labels = inputs["accepts_labels"], inputs["rejects_labels"]
        
        # 生成行动掩码,以区分有效数据点和忽略索引(IGNORE_INDEX)
        accepts_action_masks = accepts_labels.ne(IGNORE_INDEX).long()
        rejects_action_masks = rejects_labels.ne(IGNORE_INDEX).long()
        
        # 使用行动掩码过滤价值估计,忽略无效的标签位置
        accepts_value = accepts_value * accepts_action_masks
        rejects_value = rejects_value * rejects_action_masks
        
        # 计算每个批次的大小
        batch_size = accepts_value.shape[0]

        # 计算有效输入的长度,以排除填充的部分
        accepts_seq_lengths = (torch.ne(inputs["accepts_input_ids"], self.tokenizer.pad_token_id).sum(-1) - 1).to(accepts_value.device)
        rejects_seq_lengths = (torch.ne(inputs["rejects_input_ids"], self.tokenizer.pad_token_id).sum(-1) - 1).to(rejects_value.device)
        
        # 提取每个序列最后一个有效token的价值估计
        accepts_end_token_value = accepts_value[torch.arange(batch_size, device=accepts_value.device), accepts_seq_lengths]
        rejects_end_token_value = rejects_value[torch.arange(batch_size, device=rejects_value.device), rejects_seq_lengths]
        
        # 根据设置选择使用最后一个奖励还是整体奖励来计算loss1
        if self.args.use_last_reward:
            # 使用最后一个token的价值估计来计算log-sigmoid损失
            loss1 = -torch.nn.functional.logsigmoid(accepts_end_token_value - rejects_end_token_value).mean()
        else:
            # 使用整个序列的价值估计来计算log-sigmoid损失
            loss1 = -torch.nn.functional.logsigmoid(accepts_value - rejects_value).mean()
        
        # 计算因果语言模型的损失权重
        loss2 = self.args.clm_loss_weight * accepts_clm_loss

        # 合并两部分损失
        loss = loss1 + loss2 
        
        # 准备输出,包括每种情况的价值估计
        outputs = dict(
            accepts_end_token_value=accepts_end_token_value,    # shape: (batch_size,)
            rejects_end_token_value=rejects_end_token_value,    # shape: (batch_size,)
        )

        # 根据参数选择返回损失值还是损失值和输出
        return (loss, outputs) if return_outputs else loss
// !-------------------------  model table row count ------------------------------------------------------

  var trElements;

  var tr_count;

  const count_row = () => {
    // console.log()

    var tbodyElement = document.querySelector(".tbody");

    trElements = tbodyElement.querySelectorAll("tr");

    var numberOfTr = trElements.length;

    console.log("Number of <tr> elements in tbody: " + numberOfTr);

    for (let i = 0; i < trElements.length; i++) {
      tr_count = i;

      setTr_countt(tr_count);

      console.log("i is ==>", i);
    }

    // document.querySelector(".td_data").innerHTML = 1;
  };

  // console.log(trElements);

  console.log("first", tr_count);

  // console.log(numberOfTr);

  // count_row();


# 引入必要的库
import os
import sys
import torch
import logging
import math
import numpy as np
from typing import Dict
import transformers
from transformers import (
    AutoConfig,
    AutoTokenizer,
    LlamaForCausalLM,
    LlamaTokenizer,
    Trainer,
    DataCollatorWithPadding,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
)

# 将上级目录添加到系统路径中,这样可以引用上级目录中的模块
sys.path.append("..")

# 引入自定义模块,包括模型配置、任务类型定义等
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from pathlib import Path
from datasets import load_dataset, concatenate_datasets
from itertools import chain
from utils.parser_args import parser_arguments
from utils.metrics import compute_metrics_for_pair
from utils.trainer import PeftTrainer, RMPeftTrainer
from trl import AutoModelForCausalLMWithValueHead
from utils.data_collator import PairDataCollatorWithPadding
from utils.utils import PROMPT_TEMPLATE

# 设置日志记录器
logger = logging.getLogger(__name__)

# 定义一个忽略索引常量,通常用于计算交叉熵时忽略某些特定的标签
IGNORE_INDEX = -100

# 定义模型类别的字典,便于后续根据类型创建模型和分词器
MODEL_CLASSES = {
    "llama": (AutoConfig, LlamaTokenizer, LlamaForCausalLM),
    "auto": (AutoConfig, AutoTokenizer, AutoModelForCausalLM),
}


# 打印模型的可训练参数数量的函数
def print_trainable_params(model: torch.nn.Module) -> None:
    # 引用自:https://github.com/LLaMA-Efficient-Tuning-main/src/utils/other.py
    # 用于计算和打印模型可训练参数和总参数的数量
    trainable_params, all_param = 0, 0
    for param in model.parameters():
        num_params = param.numel()
        # 如果使用了DS Zero 3并且权重初始化为空
        if num_params == 0 and hasattr(param, "ds_numel"):
            num_params = param.ds_numel
        all_param += num_params
        if param.requires_grad:
            trainable_params += num_params
    print(f"可训练参数数量: {trainable_params} || 总参数数量: {all_param} || 可训练参数百分比: {100 * trainable_params / all_param:.4f}")


# 创建模型的函数
def create_model(model_args, data_args, training_args):
    # 加载模型配置、分词器、模型类
    config_class, tokenizer_class, model_class = MODEL_CLASSES[model_args.model_type]
    # 如果没有提供分词器的路径,则从预训练模型路径加载分词器
    if model_args.tokenizer_name_or_path is None:
        tokenizer = tokenizer_class.from_pretrained(model_args.model_name_or_path, use_fast=model_args.use_fast_tokenizer)
    else:
        tokenizer = tokenizer_class.from_pretrained(model_args.tokenizer_name_or_path, use_fast=model_args.use_fast_tokenizer)
    # 设置pad的token id,如果分词器中没有设置pad_token_id,则默认为0
    tokenizer.pad_token_id = 0 if tokenizer.pad_token_id is None else tokenizer.pad_token_id

    # 定义模型配置参数
    config_kwargs = {
        "trust_remote_code": True,
        "torch_dtype": model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype),
        "low_cpu_mem_usage": True,
    }
    # 如果需要以4bit加载模型,设置相关配置
    if model_args.load_in_4bit:
        config_kwargs["load_in_4bit"] = True
        config_kwargs["quantization_config"] = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.bfloat16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
        )

    # 从预训练模型加载
    model = model_class.from_pretrained(
        pretrained_model_name_or_path=model_args.model_name_or_path,
        from_tf=bool(".ckpt" in model_args.model_name_or_path),
        **config_kwargs
    )

    # 如果提供了预训练模型路径,加载预训练模型
    if model_args.peft_path is not None:
        logger.info(f"加载预训练模型: {model_args.peft_path}")
        model = PeftModel.from_pretrained(model, model_args.peft_path, is_trainable=True)

    else:
        logger.info("初始化新的PEFT模型")
        # 配置LoRA(Low-Rank Adaptation)的参数
        lora_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            inference_mode=False,
            target_modules=training_args.lora_target.split(','),
            r=training_args.lora_rank,
            lora_alpha=training_args.lora_alpha,
            lora_dropout=training_args.lora_dropout,
        )
        # 使用LoRA配置获取PEFT模型
        model = get_peft_model(model, peft_config=lora_config)

    # 从预训练模型加载含有值头(value head)的因果语言模型
    model = AutoModelForCausalLMWithValueHead.from_pretrained(model)

    # 如果提供了预训练模型路径,并且需要加载adapter模型
    if model_args.peft_path is not None:
        lora_state_dict = torch.load(os.path.join(model_args.peft_path, 'adapter_model.bin'))
        model.v_head.load_state_dict({
            "summary.weight": lora_state_dict["v_head.summary.weight"],
            "summary.bias": lora_state_dict["v_head.summary.bias"],
        })

    # 打印模型信息
    print('*********************模型*******************')
    print_trainable_params(model)

    # 启用梯度检查点(可节省内存)
    model.gradient_checkpointing_enable()
    # 设置config.use_cache为False,这通常用于禁用transformers库中的某些缓存机制
    model.config.use_cache = False

    return model, tokenizer

# 处理数据的函数
def process_data(model_args, data_args, training_args, tokenizer):
    # 使用分词器对数据进行预处理和分词
    def process_tokenize(examples):
        # 初始化模型输入
        model_inputs = {"input_ids": [], "label_ids": []}
        # 获取数据列的名称
        columns = list(examples.keys())
        # logger.info(f"列名称: {columns}")
        # 根据数据参数获取提示模板
        template = PROMPT_TEMPLATE[data_args.template]

        # 遍历每一个示例
        for index in range(len(examples[columns[0]])):
            # 检查数据中是否包含特定的列
            if 'chosen' not in columns or 'rejected' not in columns:
                # 断言数据中必须包含instruction、input和output这三个列
                assert 'instruction' in columns and 'input' in columns and 'output' in columns

                # 获取instruction、input和output的内容
                instruction, input, output = examples['instruction'][index], examples['input'][index], examples['output'][index]
                # 如果input非空,则将其添加到instruction中
                if input is not None and input != "":
                    instruction = instruction + '\n' + input
                # 确保output的长度大于1
                assert len(output) > 1
                # 分别获取prompt、chosen和rejected的内容
                prompt, chosen, rejected = instruction, output[0], output[1]
            else:
                # 确保数据中包含prompt、rejected和chosen这三个列
                assert 'prompt' in columns and 'rejected' in columns and 'chosen' in columns
                prompt, chosen, rejected = examples['prompt'][index], examples['chosen'][index], examples['rejected'][index]

            # 使用模板格式化prompt
            source = template.format_map({'instruction': prompt})
            # 使用分词器编码source、chosen和rejected
            source_ids = tokenizer.encode(text=source, add_special_tokens=False)
            accepts_ids = tokenizer.encode(text=chosen, add_special_tokens=False)
            rejects_ids = tokenizer.encode(text=rejected, add_special_tokens=False)

            # 如果编码后的长度超过最大长度,进行截断
            if len(source_ids) > training_args.max_prompt_length - 1:
                source_ids = source_ids[:training_args.max_prompt_length - 1]
            if len(accepts_ids) > training_args.max_response_length - 1:
                accepts_ids = accepts_ids[:training_args.max_response_length - 1]
            if len(rejects_ids) > training_args.max_response_length - 1:
                rejects_ids = rejects_ids[:training_args.max_response_length - 1]

            # 构造接受和拒绝的序列及其对应的标签
            source_accepts_ids = source_ids + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_accepts_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + accepts_ids + [tokenizer.eos_token_id]
            source_rejects_ids = source_ids + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]
            source_rejects_labels = [IGNORE_INDEX] * len(source_ids) + [tokenizer.bos_token_id] + rejects_ids + [tokenizer.eos_token_id]

            # 计算接受和拒绝序列的长度,并找到最大长度
            source_accepts_length, source_rejects_length = len(source_accepts_ids), len(source_rejects_ids)
            max_length = max(source_accepts_length, source_rejects_length)

            # 填充序列到最大长度
            source_accepts_ids = source_accepts_ids + [tokenizer.pad_token_id] * (max_length - source_accepts_length)
            source_accepts_labels = source_accepts_labels + [IGNORE_INDEX] * (max_length - source_accepts_length)
            source_rejects_ids = source_rejects_ids + [tokenizer.pad_token_id] * (max_length - source_rejects_length)
            source_rejects_labels = source_rejects_labels + [IGNORE_INDEX] * (max_length - source_rejects_length)

            # 合并接受和拒绝的序列以及标签
            inputs_ids = source_accepts_ids + source_rejects_ids
            labels = source_accepts_labels + source_rejects_labels

            # 将处理后的序列和标签添加到模型输入中
            model_inputs["input_ids"].append(inputs_ids)
            model_inputs["label_ids"].append(labels)

        return model_inputs

    # 处理数据集
    logger.info("处理数据集")
    with training_args.main_process_first(desc="处理数据集"):
        # 如果指定了数据集目录
        if data_args.dataset_dir is not None:
            all_datasets = []
            path = Path(data_args.dataset_dir)
            files = [file.name for file in path.glob("*.json")]
            for file in files:
                data_path = os.path.join(path, file)
                # 从json文件加载数据集
                raw_dataset = load_dataset(
                    "json",
                    data_files=data_path,
                )
                columns = list(raw_dataset.column_names.values())[0]
                # 使用分词函数处理数据集
                tokenized_data = raw_dataset.map(
                    process_tokenize,
                    batched=True,
                    num_proc=training_args.dataloader_num_workers,
                    remove_columns=columns,
                    load_from_cache_file=True
                )
                # 将处理后的数据集添加到列表中
                all_datasets.append(tokenized_data['train'])
            # 如果只有一个数据集,则直接使用,否则将多个数据集合并
            if len(all_datasets) == 1:
                all_datasets = all_datasets[0]
            else:
                all_datasets = concatenate_datasets(all_datasets)

            # 将数据集分割为训练集和测试集
            all_datasets = all_datasets.train_test_split(test_size=data_args.split_ratio)
        # 如果指定了训练文件和验证文件的路径
        elif data_args.train_file is not None and data_args.validation_file is not None:
            all_datasets = {}
            # 从json文件加载训练数据集
            raw_train_datasets = load_dataset(
                "json",
                data_files=data_args.train_file,
                cache_dir=data_args.data_cache_dir
            )
            columns = list(raw_train_datasets.column_names.values())[0]
            # 使用分词函数处理训练数据集
            all_datasets['train'] = raw_train_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
            # 从json文件加载验证数据集
            raw_valid_datasets = load_dataset(
                "json",
                data_files=data_args.validation_file,
                cache_dir=data_args.data_cache_dir
            )
            # 使用分词函数处理验证数据集
            all_datasets['test'] = raw_valid_datasets.map(
                process_tokenize,
                batched=True,
                num_proc=training_args.dataloader_num_workers,
                remove_columns=columns,
                load_from_cache_file=True
            )['train']
        else:
            # 如果数据集文件路径不正确,则抛出错误
            raise ValueError(
                "数据集文件路径不正确。 "
                "您可以提供 --dataset_dir 或提供两个文件 --train_file 和 --validation_file。 "
            )

    return all_datasets


def main():
    # 主函数入口,解析参数,创建模型,处理数据,进行训练等

    # 解析命令行参数
    model_args, data_args, training_args = parser_arguments(logger)
    # 设置随机种子以保证实验的可重复性
    transformers.set_seed(training_args.seed)

    # 创建模型和分词器
    model, tokenizer = create_model(model_args, data_args, training_args)
    # 处理数据
    all_datasets = process_data(model_args, data_args, training_args, tokenizer)

    # 创建训练器,并传入模型、训练参数、数据集等
    trainer = RMPeftTrainer(
        model=model,
        args=training_args,
        train_dataset=all_datasets['train'] if training_args.do_train else None,
        eval_dataset=all_datasets['test'] if training_args.do_eval else None,
        tokenizer=tokenizer,
        data_collator=PairDataCollatorWithPadding(tokenizer=tokenizer),
        compute_metrics=compute_metrics_for_pair,
    )

    # 如果设置为训练模式
    if training_args.do_train:
        # 开始训练
        output = trainer.train()
        # 记录训练指标
        trainer.log_metrics("train", output.metrics)
        # 保存训练指标
        trainer.save_metrics("train", output.metrics)
        # 保存模型和训练器的状态
        trainer.save_state()
        trainer.save_model()

# 程序入口
if __name__ == "__main__":
    main()
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols

tips=sns.load_dataset('tips')
tips

model=ols('total_bill~day',data=tips).fit()
one_anova_table=sm.stats.anova_lm(model,typ=1)
print("\n One way anova result based on day and time:")
print(one_anova_table)

p_val=0.042454

if p_val1>0.05:
    print('Accept H0')
else:
    print('Reject H0')
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols

tips=sns.load_dataset('tips')
tips

model=ols('total_bill~day+time',data=tips).fit()
two_anova_table=sm.stats.anova_lm(model,typ=2)
print("\n Two anova result based on day and time:")
print(two_anova_table)

p_val1=0.510480
p_val2=0.127347

if p_val1>0.05:
    print('Accept H0')
else:
    print('Reject H0')

if p_val2>0.05:
    print('Accept H0')
else:
    print('Reject H0')
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols

y=(320,428,353,331,358,400,372,455,375,328,383,308,350,417,400,325, 378,275,340, 360, 356,370,395,375,398,358,334,340,320,430)
y

t=('v1','v1','v1','v1','v1','v1','v2','v2','v2','v2','v2','v2','v3','v3','v3','v3','v3','v3','v4','v4','v4','v4','v4','v4','v5','v5','v5','v5','v5','v5',)
t
   
df=pd.DataFrame({'Yield':y,'Treatment':t})
print(df)                 

model=ols('Yield~Treatment',data=df).fit()
crd_table=sm.stats.anova_lm(model,typ=1)
print(crd_table)

p_val= 0.990685

if p_val>0.05:
    print('Accept H0 ')
else:
    print('Reject H0 ')
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.formula.api import ols

y=(24.7,20.6,27.7,16.2,16.2,24.9,27.3,28.8,22.7,15,13,22.5,38.5,39.5,36.8,19.5,15.4,26.3,28.5,31,34.9,14.1,17.7,22.6)
print(y)

b=('A', 'A', 'A', 'A', 'A', 'A', '8', '8', 'B', 'B', 'B', 'B', 'C', 'C ', 'C ', 'C', 'C','C','D', 'D', 'D', 'D', 'D','D' )
print(b)

t=('1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', '1', '2', '3', '4', '5', '6', )
print(t)

df=pd.DataFrame({'Yield':y,'Treatment':t,'Blocks':b})
print(df)

model=ols('Yield~Treatment+Blocks',data=df).fit()
rbd_table=sm.stats.anova_lm(model,typ=2)
print(rbd_table)

p_val_t=0.000545
p_val_b=0.019082

if p_val_t>0.05:
    print('Accept H0 for treatments')
else:
    print('Reject H0 for treatments')

if p_val_b>0.05:
    print('Accept H0 for blocks')
else:
    print('Reject H0 for blocks')    

<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <link rel="stylesheet" href="css/style.css" />
    <link
      rel="stylesheet"
      href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.2.1/css/all.min.css"
    />
    <script defer src="js/script.js"></script>
    <title>Music Player | OnlineITtuts Tutorials</title>
  </head>
  <body>
    <div class="background">
      <img src="imgs_audio/img-1.jpg" id="bg_img" />
    </div>
<div class="container">
      <!--===============Player Image================= -->
      <div class="player_img">
        <img src="imgs_audio/img-1.jpg" id="cover" class="active" />
      </div>
      <!--============Player Content -->
      <h2 id="music_title">Capital Letters</h2>
      <h3 id="musric_artist">Lorem, ipsum dolor.</h3>
      <!--==============Player Progress & Timmer -->
      <div class="player_progress" id="player_progress">
        <div class="progress" id="progress">
          <div class="music_duration">
            <span id="current_time">0:00</span>
            <span id="duration">0:00</span>
          </div>
        </div>
      </div>
      <!--==============Player Controllers -->
      <div class="player_controls">
        <i class="fa-solid fa-backward" title="Previous" id="prev"></i>
        <i class="fa-solid fa-play play-button" title="Play" id="play"></i>
        <i class="fa-solid fa-forward" title="Next" id="next"></i>
      </div>
    </div>
  </body>
</html>
<script src="https://cloud.ccm19.de/app.js?apiKey=8adfabfc1f648282356beb71e69d8d58ec227cbcced11fb7&amp;domain=6622ff47d0829a86d9046e82" referrerpolicy="origin"></script>
<script src="https://cloud.ccm19.de/app.js?apiKey=8adfabfc1f648282356beb71e69d8d58ec227cbcced11fb7&amp;domain=6622ff47d0829a86d9046e82" referrerpolicy="origin"></script>
 https://gist.github.com/aalexandr21/23794f7473a46de2e024a2542afb3155

I added it here https://prnt.sc/5Oc1TgkXsaGc
And to listings that you need to hide, I added the CSS class hide_when_use_filter https://prnt.sc/XaE35kAG5Ddp

/**Snippet**/


jQuery(document).ready(function ($) {
    document.addEventListener('jet-smart-filters/inited', function (initEvent) {
        JetSmartFilters.events.subscribe('activeItems/change', function (activeItems) {
            if (activeItems && activeItems.length >= 1) {
                $(".hide_when_use_filter").slideUp();
            }
            else {
                $(".hide_when_use_filter").slideDown();
            };
        });
    })
});
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
add_action('wp_footer', 'custom_hide_header_script');
function custom_hide_header_script() {
?>
<script type="text/javascript">
  (function() {
    var prevScrollpos = window.pageYOffset;
    window.onscroll = function() {
      var currentScrollPos = window.pageYOffset;
      if (prevScrollpos > currentScrollPos) {
          document.getElementById("scrolling-header").style.top = "0";
        } else {
          document.getElementById("scrolling-header").style.top = "-100px";
      }
      prevScrollpos = currentScrollPos;
    }
  })();
</script>
<?php
}
?>
const request = indexedDB.open("library");
let db;

request.onupgradeneeded = function() {
  // The database did not previously exist, so create object stores and indexes.
  const db = request.result;
  const store = db.createObjectStore("books", {keyPath: "isbn"});
  const titleIndex = store.createIndex("by_title", "title", {unique: true});
  const authorIndex = store.createIndex("by_author", "author");

  // Populate with initial data.
  store.put({title: "Quarry Memories", author: "Fred", isbn: 123456});
  store.put({title: "Water Buffaloes", author: "Fred", isbn: 234567});
  store.put({title: "Bedrock Nights", author: "Barney", isbn: 345678});
};

request.onsuccess = function() {
  db = request.result;
};
<?php
    if (get_post_field('post_name', get_the_ID()) != 'your-doctors') {
      ?>
      <div class="our-specilist__btn is-style-light-btn">
        <a href="<?php echo site_url('/your-doctors'); ?>" class="btn">
          View All Doctors
        </a>
      </div>
      <?php
    }
    ?>
class Solution {
    public int findNumbers(int[] nums) {
        return checkEven(nums,0,0);
    }
    public int checkEven(int[] nums,int i,int ans){
        if(i<nums.length){
            int n=0;
            while(nums[i]>0){
                nums[i]/=10;
                n++;
            }
            if(n%2==0){
                ans++;
            }
            return checkEven(nums,i+1,ans);
        }
        return ans;
    }
}
import * as esbuild from 'esbuild'

await esbuild.build({
  entryPoints: ['app.jsx'],
  bundle: true,
  minify: true,
  sourcemap: true,
  target: ['chrome58', 'firefox57', 'safari11', 'edge16'],
  outfile: 'out.js',
})
import * as esbuild from 'esbuild'

await esbuild.build({
  entryPoints: ['app.jsx'],
  bundle: true,
  minify: true,
  sourcemap: true,
  target: ['chrome58', 'firefox57', 'safari11', 'edge16'],
  outfile: 'out.js',
})
//show images
docker images

//show volumes
docker volume ls

//check if the volume is attached to any container:
docker ps -a --filter volume=<volume_name>
  
// remove volume
docker volume rm <volume_name>
  
//automatically delete unused volumes
docker volume prune

//upack mongo.tar with volume to the directory
tar -xvf mongo.tar -C /path/to/mongo-data

//run docker container from image with attached volume
//mongodb_dm - volume name
//mongo - container name
docker run -d --name mongo -v mongodb_dm:/data/db -p 27017:27017 mongo:latest
docker run -d --name mongo -v mongodb_dm:/data/db -p 27017:27017 mongo:4.4
// with attached volume
docker run -d --name mongo -v /path/to/mongo-data:/data/db -p 27017:27017 mongo:4.4
docker run -d --name mongo -v mongo:/data/db -p 27017:27017 mongo:4.4

// run shel inside the container
docker exec -it mongo mongo
// with attached volume and auth
docker run -d --name mongo -v /path/to/mongo-data:/data/db -p 27017:27017 mongo:4.4 --auth
docker exec -it mongo mongo -u "admin" -p "yourpassword" --authenticationDatabase "admin"

docker inspect <container_id>

//enter into container
docker exec -it <container ID> bash

//build daemon
docker-compose up --build -d

docker stop <container ID>
docker rm <container ID>


//postgres with attached volume(directory)
docker run -d --name postgres -e POSTGRES_USER=<user> -e POSTGRES_PASSWORD=<password> -p 5432:5432 -v /path/to/postgres:/var/lib/postgresql/data postgres:14


Private-Key: (2048 bit)
modulus:
    00:c2:e9:c4:ce:89:e8:be:d4:08:38:28:a5:89:7d:
    ce:4d:8e:b2:cb:2b:1b:07:3e:5d:80:25:af:fd:87:
    43:84:48:60:94:c3:19:a8:19:c4:54:5d:2a:8a:6d:
    74:e9:e6:33:22:2a:ed:3e:9a:4c:cc:82:2c:1b:b7:
    c2:28:3a:63:c1:f7:02:fb:3d:2d:ad:c0:15:d0:58:
    b0:e0:a2:dc:9d:2e:70:23:6b:31:9d:c6:0a:8f:60:
    d7:2f:11:d7:4d:25:a7:2f:0a:17:ed:fc:c2:e9:bf:
    a2:06:c4:6d:e0:a6:26:eb:9b:0e:06:35:20:ad:20:
    05:bc:f7:e5:9d:4a:9b:1c:ff:ef:c7:a0:18:98:04:
    39:d0:7b:97:f3:be:ef:36:f3:c3:51:20:0a:ca:84:
    bb:c7:b3:e7:97:bc:4d:b0:ed:c1:0a:72:11:bd:74:
    29:73:6b:91:6e:bc:2a:6d:42:d7:0a:bc:8b:2e:90:
    e8:64:2e:5f:35:0a:c4:ee:ed:60:1e:b5:17:e5:7e:
    d5:cb:e1:40:d3:3e:47:3e:70:72:4e:f3:23:6e:e5:
    87:ee:ef:96:e5:09:c6:21:da:d4:fa:e4:69:4c:fe:
    f1:67:9a:a2:f7:be:73:75:6e:63:31:de:da:8d:c0:
    a0:8e:b2:37:1b:c0:91:ce:90:10:e6:54:99:d7:8b:
    a2:6d
publicExponent: 65537 (0x10001)
privateExponent:
    0f:b7:d4:54:27:90:ea:5a:a4:f3:0a:6c:17:03:5b:
    30:ab:f2:45:2a:0b:ba:f0:cf:8f:76:fa:93:42:66:
    32:73:b7:be:27:61:99:49:c2:a8:5e:ff:00:3c:c5:
    71:db:4b:82:54:dd:de:ef:88:83:f1:77:67:20:d1:
    7a:77:02:f6:70:5a:00:32:b5:cd:4e:31:34:25:02:
    d6:8a:ab:a9:cd:45:e3:c5:50:31:fd:f7:7a:0a:80:
    2f:de:8b:73:40:37:14:37:07:28:fc:fa:26:e9:7a:
    42:7b:72:b4:ec:11:35:6c:c2:9b:c3:8b:08:0b:0e:
    0b:68:1e:4f:29:e6:0a:57:81:4c:73:a3:48:c5:3b:
    6b:93:e1:61:71:41:78:0e:08:59:a3:26:8b:88:09:
    ca:48:65:1a:5c:13:7b:a4:f2:54:c8:10:81:d2:12:
    78:b6:d3:9d:0c:ec:3a:d9:2c:f5:6f:3a:e9:25:8f:
    3d:43:81:d4:9e:67:73:96:8d:cc:d4:f2:fa:6a:ea:
    e1:c8:15:97:8d:2d:82:df:3d:e3:c9:7d:c1:9b:03:
    85:86:74:ee:3f:88:24:09:0d:93:7a:96:7a:31:3f:
    94:1b:45:9a:2a:64:4c:6e:b4:d9:cf:8d:82:7f:74:
    ff:e2:9a:45:98:4d:b8:2c:ac:91:f0:b6:ea:4b:75:
    6d
prime1:
    00:e7:a9:05:e4:1f:25:67:88:4b:06:63:26:b3:a1:
    7e:b8:e6:95:4d:4e:14:9f:e2:e6:75:14:b5:ec:57:
    c9:1a:89:36:43:63:11:87:79:34:f1:83:c3:06:50:
    f5:4c:92:65:49:f9:2b:aa:b6:70:f0:82:78:f9:31:
    f1:e2:f8:c9:57:6d:2d:e8:6b:58:59:af:bc:ad:c6:
    3c:cf:9d:ca:59:54:8b:be:18:2b:47:5e:30:00:d0:
    1d:34:40:86:23:da:98:b2:43:2d:0c:f4:ee:1b:3c:
    7e:18:99:1b:7d:2a:4f:99:72:b0:e5:49:d8:15:a6:
    8f:c1:04:ed:e3:d6:70:28:0b
prime2:
    00:d7:64:5b:89:a8:12:c9:86:14:86:43:4d:bc:e6:
    9d:da:e9:dd:20:44:c0:d7:56:49:3d:18:73:53:c0:
    1a:87:21:28:f3:06:e5:98:fa:64:87:69:e7:e2:19:
    6c:e3:e8:46:2f:fe:96:94:e0:9c:f9:46:7d:a4:4f:
    2b:c3:82:e3:2f:f5:70:65:e8:68:61:da:2e:48:0a:
    8c:3d:f0:03:c1:79:e2:2f:ba:7a:c2:19:67:94:cd:
    ed:cb:85:b5:07:11:70:17:42:84:13:65:f7:fe:60:
    6a:89:a3:a4:71:f8:f9:68:68:1f:3f:4e:83:85:de:
    8c:75:57:ac:e7:f1:c8:52:67
exponent1:
    00:96:23:1b:28:4f:6f:44:7c:36:73:7b:4b:17:dc:
    30:6e:d1:32:2e:19:ce:90:18:d3:d7:49:86:e6:17:
    b6:c4:35:ed:2f:1b:43:bf:13:34:d6:d9:d8:49:7d:
    aa:9c:e7:bc:31:87:05:5f:ec:41:a1:fe:4d:3c:e3:
    bb:aa:a8:26:85:3c:c3:26:00:7c:10:14:2f:2a:48:
    72:1c:f4:5c:b2:2c:73:2c:ce:29:2e:65:c2:a2:a4:
    3e:69:20:f0:4e:c8:5b:72:1c:cb:53:bc:61:9c:d1:
    45:bf:d7:6a:d3:61:da:98:ce:96:0a:e4:22:e1:55:
    43:91:8b:0a:4a:41:2e:0b:25
exponent2:
    52:a4:e5:22:8a:64:bb:02:41:39:ca:f5:36:f7:dd:
    b5:b3:69:ef:09:b2:5c:6d:75:28:86:0b:2a:25:59:
    88:63:c6:cc:88:e6:eb:ad:97:89:05:af:bd:67:38:
    0a:b2:82:cb:c3:89:8f:92:27:68:d6:0d:3b:50:4d:
    60:75:06:2a:ce:a7:d3:d2:3a:ca:e0:e6:45:2a:16:
    e9:65:00:6b:ea:12:49:71:e9:cc:33:e7:34:87:57:
    28:72:23:25:95:21:47:2b:74:75:e4:f8:24:98:66:
    08:6d:cb:99:17:88:ca:f0:b8:9c:97:c9:8b:41:fa:
    ee:c3:37:4a:bf:d0:b7:1d
coefficient:
    39:f0:4c:6f:86:02:36:e5:19:cb:af:ca:ef:92:64:
    66:0b:81:0d:42:db:94:13:0e:5d:e2:66:d8:8e:a3:
    f7:c8:7e:d1:d4:46:73:3f:1d:b9:bb:24:dc:a0:c0:
    6f:3c:8e:6f:b3:c3:bb:a8:1a:ee:6a:0d:a2:50:d3:
    64:b6:c3:44:39:08:13:f1:fa:fc:2c:5d:b3:f3:7d:
    3a:77:8e:a8:4a:6a:f2:19:4b:3f:b7:5e:c3:45:3e:
    d4:e7:6e:75:c4:44:cc:5c:5d:53:0e:ea:e2:a5:33:
    e4:1c:d2:7d:6a:12:8b:70:a4:f6:61:d3:15:45:62:
    7d:2c:92:63:40:55:45:1e
/**
 *  Custom Translation woocommerce and wordpress only on the Frontend !!!
 */

add_action('after_setup_theme', 'register_frontend_translation_filters');

function register_frontend_translation_filters() {
    if (!is_admin()) {
        add_filter('gettext', 'translate_reply');
        add_filter('ngettext', 'translate_reply');
    }
}

function translate_reply($translated) {
    $translated = str_ireplace('Enter your e-mail', 'Введите ваш email', $translated);
    $translated = str_ireplace('Просмотр корзины', 'товар в корзине', $translated);
    $translated = str_ireplace('Filters', 'поиск и фильтр товаров', $translated);
    $translated = str_ireplace('Show', 'показать', $translated);
    $translated = str_ireplace('Cancel', 'отмена', $translated);
    return $translated;
}



/**
 *  Custom Translation only on the Frontend (exclude checkout - issue)
 */

add_action('after_setup_theme', 'register_frontend_translation_filters');

function register_frontend_translation_filters() {
    if (!is_admin()) {
        add_filter('gettext', 'translate_reply');
        add_filter('ngettext', 'translate_reply');
    }
}

function translate_reply($translated) {
    // Exclude translations on the WooCommerce checkout page
    if (function_exists('is_checkout') && is_checkout()) {
        return $translated;
    }

    // Translate other strings
    $translated = str_ireplace('Enter your e-mail', 'Введите ваш email', $translated);
    $translated = str_ireplace('Просмотр корзины', 'товар в корзине', $translated);
    $translated = str_ireplace('Filters', 'фильтр товаров', $translated);
    $translated = str_ireplace('Show', 'показать', $translated);
    $translated = str_ireplace('Cancel', 'отмена', $translated);
    $translated = str_ireplace('Размещение заказа', 'Оформить заказ', $translated);
    
    return $translated;
}
#include <stdio.h>

int main(void)
{
    int element[100];
    int number_element;
    
    
    printf("Input total number of elements required:\n");
    scanf("%d",&number_element);
    
    for(int i = 0; i < number_element; i++)
    {
       printf("Input element [%d]:\n",i); 
    }
    
    printf("\nBefore insertion:\n"); 
    
    for(int i = 0; i < number_element; i++)
    {
        scanf("%d",&element[i]);
        printf("Element [%d] is %d\n",i,element[i]); 
    }
    
    int new_value;
    int position;
    
    printf("\nInput a new value to insert:\n");
    scanf("%d",&new_value);
    printf("Input where to insert the value %d:\n",new_value);
    scanf("%d",&position);
    
    // Shift elements to make space for new value
    for(int i = number_element; i>position;i--)
    {
        element[i] = element[i-1];
    }
    
    // Insert new value at specified position
    element[position] = new_value;
    number_element ++;

    printf("After insertion:\n");

    for(int i = 0; i < number_element; i++)
    {
        scanf("%d",&element[i]);
        printf("Element [%d] is %d\n",i,element[i]);

        
    }
	return 0;
}
import i18n from "@/lang/index"

const tips = i18n.global.t('return') 
star

Sun Apr 21 2024 11:23:02 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sun Apr 21 2024 11:20:42 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sun Apr 21 2024 08:03:42 GMT+0000 (Coordinated Universal Time)

@mubashar

star

Sun Apr 21 2024 06:59:35 GMT+0000 (Coordinated Universal Time)

@meanaspotato #c

star

Sat Apr 20 2024 22:31:44 GMT+0000 (Coordinated Universal Time)

@davidmchale #width #min()

star

Sat Apr 20 2024 22:09:02 GMT+0000 (Coordinated Universal Time)

@davidmchale #variable #declaration

star

Sat Apr 20 2024 18:11:50 GMT+0000 (Coordinated Universal Time)

@Paloma #bash

star

Sat Apr 20 2024 17:03:31 GMT+0000 (Coordinated Universal Time)

@Paloma #typescript

star

Sat Apr 20 2024 14:14:25 GMT+0000 (Coordinated Universal Time) https://darkwebmarketbuyer.com/product/acxion-phentermine-c-30-mg-30-caps/

@darkwebmarket

star

Sat Apr 20 2024 13:17:46 GMT+0000 (Coordinated Universal Time) https://blog.logrocket.com/5-ways-make-http-requests-node-js/

@USFAkbari #javascript

star

Sat Apr 20 2024 10:35:22 GMT+0000 (Coordinated Universal Time)

@freepythoncode ##python #coding #python #image #pil #pillow

star

Sat Apr 20 2024 06:23:38 GMT+0000 (Coordinated Universal Time) https://paste.openstack.org/show/b7duBLP15n0fCjivoqER/

@yichaojoey

star

Sat Apr 20 2024 06:04:55 GMT+0000 (Coordinated Universal Time)

@codeing

star

Sat Apr 20 2024 05:53:34 GMT+0000 (Coordinated Universal Time) https://paste.openstack.org/show/bTCdkp0FxYQoa1f9JnRT/

@yichaojoey

star

Sat Apr 20 2024 02:26:31 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 02:25:43 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 01:42:26 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 01:39:53 GMT+0000 (Coordinated Universal Time)

@mrv

star

Sat Apr 20 2024 00:33:52 GMT+0000 (Coordinated Universal Time) https://arhaanali.medium.com/build-a-music-player-with-html-css-and-javascript-53ff3e168e1e

@Chijunior

star

Fri Apr 19 2024 23:42:19 GMT+0000 (Coordinated Universal Time) https://cloud.ccm19.de/domains/6622ff47d0829a86d9046e82/dashboard

@Angel

star

Fri Apr 19 2024 23:37:05 GMT+0000 (Coordinated Universal Time) https://cloud.ccm19.de/domains/6622ff47d0829a86d9046e82/dashboard

@Angel

star

Fri Apr 19 2024 22:18:17 GMT+0000 (Coordinated Universal Time) https://learn.microsoft.com/en-us/windows/wsl/install

@dw

star

Fri Apr 19 2024 20:03:25 GMT+0000 (Coordinated Universal Time)

@Y@sir #filter #jetsmartfilters

star

Fri Apr 19 2024 19:33:05 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:05 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:03 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:03 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:01 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:02 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:02 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:01 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:33:01 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:32:57 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:32:59 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 19:32:57 GMT+0000 (Coordinated Universal Time) z

@Cyberspider #python

star

Fri Apr 19 2024 17:46:55 GMT+0000 (Coordinated Universal Time)

@Coldfusion1984

star

Fri Apr 19 2024 16:40:25 GMT+0000 (Coordinated Universal Time) https://www.w3.org/TR/IndexedDB/

@mikelgiles

star

Fri Apr 19 2024 09:06:45 GMT+0000 (Coordinated Universal Time)

@divyasoni23 #php

star

Fri Apr 19 2024 08:01:39 GMT+0000 (Coordinated Universal Time) https://leetcode.com/problems/find-numbers-with-even-number-of-digits/

@prakharleet

star

Fri Apr 19 2024 02:38:02 GMT+0000 (Coordinated Universal Time) https://esbuild.github.io/getting-started/

@poika0799

star

Fri Apr 19 2024 02:37:34 GMT+0000 (Coordinated Universal Time) https://esbuild.github.io/getting-started/

@poika0799 #asp.net

star

Thu Apr 18 2024 21:46:28 GMT+0000 (Coordinated Universal Time) https://geometrydash-lite.com/

@Penguinator

star

Thu Apr 18 2024 19:38:43 GMT+0000 (Coordinated Universal Time)

@radeon323 #docker #mongodb #postgres

star

Thu Apr 18 2024 18:54:27 GMT+0000 (Coordinated Universal Time) https://darkwebmarketbuyer.com/product/mr-mushies-mushroom-bars/

@darkwebmarket

star

Thu Apr 18 2024 16:16:43 GMT+0000 (Coordinated Universal Time) http://primitekgroup.com:2082/cpsess4894933346/frontend/jupiter/ssl/genkey.html

@mainul

star

Thu Apr 18 2024 15:17:47 GMT+0000 (Coordinated Universal Time)

@mastaklance

star

Thu Apr 18 2024 11:44:01 GMT+0000 (Coordinated Universal Time) https://artprojectgroup.es/tienes-infectada-tu-base-de-datos

@edujca

star

Thu Apr 18 2024 11:05:58 GMT+0000 (Coordinated Universal Time) https://codevalidator.aut.ac.nz/autmoodle1/mod/quiz/attempt.php?attempt

@meanaspotato #c

star

Thu Apr 18 2024 09:32:51 GMT+0000 (Coordinated Universal Time) https://www.jianshu.com/p/fffe291d7d3e

@adoin

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension