Snippets Collections
//Write a program to reverse an array using pointers.
#include <stdio.h>

void reverseArray(int *arr, int size) {
    int *start = arr;
    int *end = arr + size - 1;
    int temp;

    // Swap elements using pointers
    while (start < end) {
        temp = *start;
        *start = *end;
        *end = temp;

        start++;
        end--;
    }
}

int main() {
    int arr[] = {1, 2, 3, 4, 5};
    int size = sizeof(arr) / sizeof(arr[0]);

    printf("Original Array: ");
    for (int i = 0; i < size; i++) {
        printf("%d ", arr[i]);
    }
    printf("\n");

    reverseArray(arr, size);

    printf("Reversed Array: ");
    for (int i = 0; i < size; i++) {
        printf("%d ", arr[i]);
    }
    printf("\n");

    return 0;
}
}
justify-content: center;
align-items: center
}
justify-content: space-around;
align-items: flex-end
}
flex-direction: row-reverse;
justify-content:flex-end
}
flex-direction: column;
justify-content:flex-end
}
flex-direction:column-reverse;
justify-content:space-between
}
flex-direction:row-reverse;
justify-content: center;
align-items:flex-end;
#pond {
  display: flex;
order:3;
align-self:flex-end
#include <stdio.h>
#include <stdlib.h>

int main() {
    int rows = 3, cols = 3;

    // Allocate memory for a 2D array using a pointer to an array
    int (*arr)[cols] = (int (*)[cols])malloc(rows * cols * sizeof(int));

    if (arr == NULL) {
        printf("Memory allocation failed\n");
        return 1;
    }

    // Initialize the array
    int value = 1;
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            arr[i][j] = value++;
        }
    }

    // Access elements
    printf("Accessing dynamically allocated 2D array using pointer to an array:\n");
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            printf("%d ", arr[i][j]);
        }
        printf("\n");
    }

    // Free memory
    free(arr);

    return 0;
}
Please start the Screaming Frog SEO Spider, then in the top navigation click on 'Licence', followed by 'Enter Licence...' and insert the following details:
 
Username: fhsites15
Licence Key: 584BEBD129-1763856000-9E287F165A
 
Click OK.  You will then need to close and reopen the Screaming Frog SEO Spider before the crawl limits are removed and the configuration options are accessible.
 
Please note your licence key will expire on: 23 November 2025 GMT.

<FilesMatch "xmlrpc\.php$">
    <IfModule mod_authz_core.c>
        Require all denied
    </IfModule>
    <IfModule !mod_authz_core.c>
        Deny from all
    </IfModule>
</FilesMatch>

# BEGIN LSCACHE
## LITESPEED WP CACHE PLUGIN - Do not edit the contents of this block! ##
<IfModule LiteSpeed>
RewriteEngine on
CacheLookup on
RewriteRule .* - [E=Cache-Control:no-autoflush]
RewriteRule litespeed/debug/.*\.log$ - [F,L]
RewriteRule \.litespeed_conf\.dat - [F,L]
RewriteRule ^xmlrpc\.php$ - [F,L]


Function.php

// Disable XML-RPC functionality
add_filter('xmlrpc_enabled', '__return_false');

// Disable X-Pingback HTTP Header
add_filter('wp_headers', function($headers) {
    unset($headers['X-Pingback']);
    return $headers;
});

// Disable XML-RPC methods from being accessible
add_filter('xmlrpc_methods', function($methods) {
    return [];
});

// Prevent direct access to xmlrpc.php
add_action('init', function() {
    if (isset($_SERVER['REQUEST_URI']) && strpos($_SERVER['REQUEST_URI'], 'xmlrpc.php') !== false) {
        wp_die('Access denied', 'Error', ['response' => 403]);
    }
});
// Disable WP REST API by users - hide user names
add_filter( 'rest_endpoints', function( $endpoints ){
    if ( isset( $endpoints['/wp/v2/users'] ) ) {
        unset( $endpoints['/wp/v2/users'] );
    }
    if ( isset( $endpoints['/wp/v2/users/(?P<id>[\d]+)'] ) ) {
        unset( $endpoints['/wp/v2/users/(?P<id>[\d]+)'] );
    }
    return $endpoints;
});
<script type="text/javascript">
  jQuery(document).ready(function($){
    fhScript= document.createElement('script');
    fhScript.src = "https://fareharbor.com/embeds/api/v1/?autolightframe=yes";
    $('body').append(fhScript);
  });
</script>
This does not fall under the responsibilities of the FH integration team, as it is unrelated to our integration and it's up to the client to keep content updated on their website. 
I’ve made the updates; please inform the client that this is an exception.
CODA LICENSE NUMBER


PEGA-LATA-M5PV-FFFD-CLAT-A
{if isset($product_manufacturer->id)}
    <div class="prod-manufacturer">
      {if isset($manufacturer_image_url)}
        <a href="{$product_brand_url}">
          <img src="{$manufacturer_image_url}" class="img img-fluid manufacturer-logo" alt="{$product_manufacturer->name}" loading="lazy">
            </a>
    {else}                
      <span>
        <a href="{$product_brand_url}">{$product_manufacturer->name}</a>
     </span>
     {/if}
      </div>
{/if}
import speech_recognition as sr
from googletrans import Translator
from gtts import gTTS
import os
The Kumbh Mela is one of the largest religious gatherings in the world, held in India. It is a major Hindu pilgrimage and festival where crores of devotees gather to bathe in sacred rivers, believing it will cleanse them of sins and lead to salvation.

The festival takes place every 12 years, rotating among four locations:

Prayagraj .

Haridwar .

Nashik .

Ujjain .

Purna Kumbh Mela: Held every 12 years at one of the four locations.

Ardh Kumbh Mela: Held every 6 years at Haridwar and Prayagraj.

Maha Kumbh Mela: Held every 144 years (12 Purna Kumbhs) at Prayagraj.

40 crore people are expected to visit over the period of 45 days.

and the gov is spending 7000crores and is expected to earn 25000crores from hosting mahakhumb mela

Kumbh Mela attracts not only pilgrims but also tourists and photographers from across the globe, making it a significant event culturally, spiritually, and economically.

It is mythologically believed that churning of the ocean (Samudra Manthan) by gods and demons. During this event, drops of the nectar of immortality fell at the four locations.
try:
    package = "zarr"
    package_version = version(package)
    major_version = int(package_version.split(".")[0])  # Extract the major version
    if major_version == 3:
        print(f"The package major version is {major_version}.")
        import zarr
        import fsspec
        # strip leading 's3://' from url
        url1 = url1[5:]
        url2 = url2[5:]
        fs = fsspec.filesystem("s3", asynchronous=True)
        store1 = zarr.storage.FsspecStore(fs, path=url1)
        store2 = zarr.storage.FsspecStore(fs, path=url2)
        file1 = zarr.open(store=store1)
        file2 = zarr.open(store=store2)
    else:
        print(f"The package major version is {major_version}.")
        import s3fs
        fs = s3fs.S3FileSystem(anon=True)
        file1 = s3fs.S3Map(url1, s3=fs)
        file2 = s3fs.S3Map(url2, s3=fs)
        
except PackageNotFoundError:
    print(f"{package} is not installed")
sudo apt-get install multiverse -y || true

sudo apt-get update

sudo apt-get install ubuntu-restricted-extras
/* HUD CROSSHAIR */
.hud-crosshair{
    visibility: clear;
    position: fixed;
    width: 25px;
    height: 25px;
    left: 50%;
    top: 50%;
    margin-left: -13px;
    margin-top: -13px;
    z-index: 12;
}

.hud-crosshair div{
    background: #00ffb3;
    position: absolute;

}

.hud-crosshair-1, .hud-crosshair-2{
    width: 2px;
    height:10px;
    left: 12px;
}

.hud-crosshair-3, .hud-crosshair-4{
    width: 10px;
    height:2px;
    top: 12px;
# DEA between BC1 BC6 ####
# DEA of each community between BC1 and BC6 to see differences in gene expression
# i look particularly at immune communities 5-7 (defined at resolution 0.25)

# ensure assay is integrated 
DefaultAssay(so_donorint) <- "integrated"

# combine cluster number and condition info into new variable 'celltype.condition'
# then set it as identity class of the cells 
# consider - how cell clusters behave under different conditions
so_donorint$celltype.condition <- paste(so_donorint$seurat_clusters, so_donorint$Conditions, sep="_")
Idents(so_donorint) <- "celltype.condition"

# run DEA for 2 sets of cells BC1, BC6 - compare expression profiles 
uro_BC1vsBC6 <- FindMarkers(so_donorint, ident.1 = "3_BC1", ident.2 = "3_BC6", test.use = "bimod")

# check output 
head(uro_BC1vsBC6)

# prepare data needed for volcano plot, as previous 
uro_BC1vsBC6["p_val_adj"] <- lapply(uro_BC1vsBC6["p_val_adj"], pmax, 1e-300)
uro_BC1vsBC6$DEA <- "NO"
uro_BC1vsBC6$DEA[uro_BC1vsBC6$avg_log2FC > 1 & uro_BC1vsBC6$p_val_adj < 0.05] <- "UP"
uro_BC1vsBC6$DEA[uro_BC1vsBC6$avg_log2FC < -1 & uro_BC1vsBC6$p_val_adj < 0.05] <- "DOWN"
uro_BC1vsBC6$genes <- rownames(uro_BC1vsBC6)

# Volcano plot to visualize DEA results to compare between BC1 and BC6
ggplot(uro_BC1vsBC6, aes(x=avg_log2FC, y=-log10(p_val_adj))) + 
  geom_point(aes(colour = DEA), show.legend = FALSE) + 
  scale_colour_manual(values = c("blue", "gray", "red")) +
  geom_hline(yintercept = -log10(0.05), linetype = "dotted") + 
  geom_vline(xintercept = c(-1,1), linetype = "dotted") + 
  theme_bw() + 
  theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), plot.title = element_text(hjust = 0.5)) +
  geom_text_repel(data=subset(uro_BC1vsBC6, (abs(avg_log2FC) > 7.3 & p_val_adj < 0.05)| (avg_log2FC < -3.5 & p_val_adj < 0.05)), aes(x=avg_log2FC, y=-log10(p_val_adj), label=genes), max.overlaps = 1000, size=4.5) +
  ggtitle("DEA of community 3 between BC1 and BC6 (BC1 in red)") 

 ggsave("plots5/BC1vsBC6_3.png")
# ident.1 = BC1 community 0 shown red // ident.2 = BC6 community 0 shown blue

# Extract upregulated genes (log fold change > 3, p-value < 0.05 )
upreg_0 <- uro_BC1vsBC6[uro_BC1vsBC6$avg_log2FC > 7 & uro_BC1vsBC6$p_val_adj < 0.05, "genes"]

# Extract downregulated genes (log fold change > 4, p-value < 0.05 )
downreg_0 <- uro_BC1vsBC6[uro_BC1vsBC6$avg_log2FC < -3 & uro_BC1vsBC6$p_val_adj < 0.05, "genes"]

print(upreg_0) 
print(downreg_0)

# for ident.1 = 0_BC1 shown red, genes expressed are ranked in descending:
# majority for cell signalling and transcription regulation 
# structural proteins and cytoskeleton
# metabolism and detoxification
# immune response and inflammation
# cellular transport and membrane proteins
# MANY non-coding RNAs, uncharacterized genes, miscellaneous functions
# may be linked to age/ health factors again

# for ident.2 = 0_BC6 shown blue, genes expressed are ranked in descending:
# most significant gene PLD4 (immune response)
# extracellular matrix and structural proteins (8/16 genes)
# cell signalling and regulation (4/16 genes)
# membrane transport and transporters (2/16 genes)
# cell cycle and division (1/16)
# synaptic function and signalling (1/16 genes)
# unknown or uncharacterized (2/16 genes)

# create pi values
# add new column to data frame 'uroG1_T1vsT3' where each gene is assigned a pi score (calc below)
# pi score allows me to rank genes - based on measure of log2fold change + stats significance, can then use to identify top genes for further analysis 
uro_BC1vsBC6 <- mutate(uro_BC1vsBC6, pi = (avg_log2FC * (-1 * log10(p_val_adj))))

# check data set - look at the top hit genes !!
head(uro_BC1vsBC6)

# select top 20 genes (for Ta6) based on pi score in descending order - fr highest pi scores
# these genes are most significant and differentially expressed when comparing between Ta1, Ta6 for G1 urothelial cluster
uro_BC1vsBC6 %>% arrange(desc(pi)) %>% slice(1:20) 

# select top 20 genes (for Ta1) based on pi score in ascending order - fr low pi score
# these genes are either of smaller fold changes or less stats significant p-values 
uro_BC1vsBC6 %>% arrange(pi) %>% slice(1:20) 

# prep for GSEA 
# extract 'genes', 'pi' from data frame and convert 'prerank' to numeric vector 
prerank <- uro_BC1vsBC6[c("genes", "pi")]
prerank <- setNames(prerank$pi, prerank$genes)
str(prerank)

# run GSEA and store results in fgseaRes
genesets = gmtPathways("data5/c2.cp.v2024.1.Hs.symbols.gmt")
fgseaRes <- fgsea(pathways = genesets, stats = prerank, minSize=15, maxSize=300, eps=0)

# filter results from GSEA stored in fgseaRes
# checking top hits for either side of volcano plot 
fgseaRes %>% arrange(padj) %>% filter(NES<0) %>% slice(1:20)
fgseaRes %>% arrange(padj) %>% filter(NES>0) %>% slice(1:20)

# add bar plot to show the differences following GSEA

# Barplot of top GSEA results
fgseaRes %>%
  arrange(desc(NES)) %>%
  slice(1:20) %>%
  ggplot(aes(x = reorder(pathway, NES), y = NES, fill = NES)) +
  geom_bar(stat = "identity") +
  coord_flip() +
  labs(title = "Top 20 Pathways by NES, BC1vsBC6_3 ",
       x = "Pathway",
       y = "Normalized Enrichment Score (NES)") +
  theme_minimal()

ggsave("plots5/BC1vsBC6_3_GSEA.png", bg = "white")
# 4 Find clusters of cell types ####

# Data quality is sorted following QC (section 2) and integration (section 3) - data is single analysis object!

# I can now look at biology - try identify communities of cells alike. genes can work in pathways so not equally informative
# use PCA to reduce dimension and identify patterns of shared variance then derive (potential) communities 
# also work out how many clusters are (potentially!) informative 

# run PCA on integrated data set 
# recap PCA reduces dimensions in data, only capture max variance in data so easy to visualize
# in context of scRNAseq, I'm using PCA to get main sources of variation in gene expression
so_donorint <- RunPCA(so_donorint)

# top 5 PCs shown, biggest contributor in each direction are : 
# gene info obtained via uniprot
# PC1 positive = HLA-DRB6 - antigen presenting to CD4+ T cells, immune response
# PC1 negative = ADIRF - regulates adipogenesis and has a role in fat metabolism 
# PC2 positive = TXNIP - negative regulator of thioredoxin, tumour suppressor gene 
# PC2 negative = MT-CO1 - component of cytochrome c oxidase for respiration

# pick the most informative PC
# elbow plot to show me percentage variance explained by each PC
ElbowPlot(so_donorint)

# As number of PC increases, the variance explained by each subsequent PC decrease more gradually
# The "elbow" is the point where the curve flattens out - stop considering additional PC as they contribute little to explaining data's variance.

# after running PCA to assess how much variance in each PC
# calc % variance explained by each PC and cumulative % variance across all PCs 
# helps me determine number of PCs to keep for downstream analysis
pct <- so_donorint[["pca"]]@stdev / sum(so_donorint[["pca"]]@stdev) * 100
cumu <- cumsum(pct)

# identify point/PC where cumulative variance > 90%, % variance explained < 5% (does not contribute much to variance)
co1 <- which(cumu > 90 & pct < 5)[1]
co1 # 43

# identify point/PC where difference in % variance explained (contribution to variance) between 2 consecutive PCs is > 0.1%
# I can find the point of significant drop in contribution of variance explained by each PC
co2 <- sort(which((pct[1:length(pct) - 1] - pct[2:length(pct)]) > 0.1), decreasing = T)[1]
co2 # 12

# select minimum value between the 2 measures (co1, co2)
# provides criteria to decide when to stop including additional PCs
# co1 - point where a PC has cumulative variance > 90%, variance explained < 5%
# co2 - point where there's significant drop in variance explained between 2 consecutive PCs
pcs <- min(co1, co2)
pcs # 12

# Here is comparison of point at 90% variance explained and where PCAs stops being informative
# create data frame containing 3 variables
# pct - % variance contributed by each PC
# cumu - cumulative % variance contributed by PCs 
# rank - ranking of each PC showing its order in analysis
cumu_df <- data.frame(pct = pct, cumu = cumu, rank = 1:length(pct))

# Elbow plot to visualize relation between cumulative variance explained, % variance contribution by each PC
ggplot(cumu_df, aes(cumu, pct, label = rank, color = rank > pcs)) + 
  geom_text() +
  theme_bw()

# clean up - remove objects unnecessary for downstream analysis 
rm(pct, cumu, cumu_df, co1, co2)

# PCA-based scatter plot to visualize 4 selected features/genes - check if PCA successful
# cells expressing these markers are shaded from dark blue (most expression) to pale pink (least/none)
FeaturePlot(so_donorint, reduction = "pca", dims = c(1, 2), 
            features = c("HLA-DRB6", "ADIRF", "TXNIP", "MT-CO1"), cols = c("#fcd5ce", "#4886af"), 
            pt.size = 2.5, alpha = 0.7, order = TRUE,)

ggsave("plots5/6_featureplot.png")

# look at first 2 PCs
# gene 'MT-CO1' shown to have greatest expression across PC1 PC2, closely followed by gene 'ADIRF'
# MT-CO1 involved in respiration and could be linked to providing energy required for proliferation
# since BC1 BC6 = non-malignant, proliferation of the communities highly expressing MT-CO1 should be a contributor to non-malignancy
# but need to perform more analysis before making any conclusions
# gene 'HLA-DRB6' have least expression across PC1 PC2 

# Neighbor analysis - to find relationships between cells (calc neighborhoods) based on first PC
DefaultAssay(so_donorint) <- "integrated"
so_donorint <- FindNeighbors(so_donorint, dims = 1:pcs)

# problems with neighbor analysis - finds potential communities but does not give reference of how many communities should be present
# community detection starts with all cells in 1 community - followed by successive subdivisions
# using cluster tree = can visualize how communities derived + subdivided 

# so, perform clustering analysis across multiple resolution values
# then visualize resulting clusters by cluster tree plot 
so_donorint_orgs <- FindClusters(so_donorint, resolution = seq(0, 0.3, 0.05))
clustree(so_donorint_orgs, show_axis = TRUE) + theme(legend.position = "none") + ylab("Resolution")

ggsave("plots5/7_clustertree_lineage.png")

# cluster tree plot shows successive community divisions as resolution increases
# issue is - most numerous type often results in most subgroups. Purely due to its high abundance and not biologically different
# I will therefore start my analysis at low resolution to monitor closely the main community lineages

# OK before that clear up first
rm(so_donorint_orgs)

# set initial resolution to 0.05 for clustering cells - I should see 3 communities 
res <- 0.05

# now assign cells to a cluster and run UMAP - reduce dimensions so I can visualize
# UMAP allows me to visualize clusters based on first PCA 
# 2D representation of cells to visualize similarities/differences based on high dimensional features (i.e gene expression) - based on first PCA
so_donorint <- FindClusters(so_donorint, resolution = res)
so_donorint <- RunUMAP(so_donorint, dims=1:pcs, return.model = TRUE, verbose = FALSE)

# UMAP plot - Visualization of my clustered data 
DimPlot(so_donorint, reduction = "umap", label = TRUE, label.size = 6)

# based on cluster tree plot I should see 3 communities 
# however population 1 and 2 each shown as 2 separate groups suggesting further subdivision
# I will test with higher resolution later in this analysis to see subdivisions for different cell types (see section 6)

# save plot first
ggsave("plots5/8_UMAP_res0.05.png")

# split UMAP plot - based on 'conditions' + add labels 
# shows how clusters are distributed across conditions BC1 BC6
DimPlot(so_donorint, reduction = "umap", split.by = "Conditions", label = TRUE, label.size = 6)

ggsave("plots5/9_splitUMAP_res0.05.png")

# samples are identical - same grade, same non-malignant NMIBC tumour pattern should be the same
# differences are observed and could be due to person differences. Samples are from individuals of different ages, and possibly very different lifestyles, health status
# genetic differences, mutations can also contribute to these differences
# community 0 from BC1 lacks a great number of cells in comparison to BC6
# community 2 is suggested to further subdivide and I will look at a higher resolution later (see section 6)
# community 1 is also suggested to further subdivide, and BC6 is shown to have less cells here compared to BC1
# please see section 6 - Vary resolution

# % distribution of each cell cluster across different conditions 
round(prop.table(table(Idents(so_donorint), so_donorint$Conditions))*100,3)
# community 0 = 22.6% (BC1) 52.3% (BC6) differs most significantly



############################################################

# 5 Annotate clusters ####

# set default assay to 'RNA' and normalize gene expression data in 'RNA'
DefaultAssay(so_donorint) <- "RNA"
so_donorint_dp_norm <- NormalizeData(so_donorint, verbose = TRUE)

# now define list of genes for biological markers - likely communities in urothelium
# likely urothelial markers + others 
features <- c(
              "TERT",                               # telomerase
              "UPK1A",                              # marker for urothelial differentiation
              
              "UPK2", "EPCAM", "KRT18", "KRT13",    # urothelial markers
              "COL1A1", "CALD1",                    # muscle markers
              "PECAM1",                             # endothelial
              "DCN", "PDPN", "TAGLN",               # fibroblasts
              "CD2", "CD3D", "CD3E",                # T cells
              "C1QC", "CD14", "CSF1R",             # macrophages/myeloid
            "MRC1")

# DotPlot to visualize expression of list of genes across different clusters of cells
# look at intensity/ proportion of cells expressing each marker in each community
# list of genes were defined previously
DotPlot(so_donorint_dp_norm, features = features, dot.scale=8) + 
  theme(axis.text.x = element_text(size = 11)) + 
  RotatedAxis ()

ggsave("plots5/10_Dotplot.png", bg = "white")
# urothelial markers are shown to have high % expression - expected 

# FeaturePlot to visualize expression of selected genes across UMAP-reduced data set
# check how specific the markers are 
FeaturePlot(so_donorint_dp_norm, reduction = "umap", 
            pt.size = 2, alpha = 0.7, order = TRUE, 
            features = c("UPK2", "EPCAM","PECAM1", "CD2", 
                         "C1QC", "TERT", "CD3D", 
                         "CD3E", "MRC1", "CD19", "CD20", "CD22"))

ggsave("plots5/11_featureplot.png")

# community 2 (although later subdivides as suggested by clustertree) both appears to be immune cells
# C1QC highly expressed = macrophages // CD3D highly expressed = T cells
# communities 1,0 contains urothelial markers but subdivision was shown in cluster tree
# community 1, 0 important as significant difference observed between BC1, BC6
# find differences with differential expression analysis then visualize with volcano
# visualize genes expressed in each community, and attempt to categorize them to a theme i.e immune/ cell cycle related

# now prepare DEA
# switch back to integrated assay for comparisons (not for plotting)
DefaultAssay(so_donorint) <- "integrated"

# run DEA to identify differentially expressed genes between community 0, 1
uro.markers <- FindMarkers(so_donorint, ident.1 = 0, ident.2 = 1)

# check data frame 
head(uro.markers)

# avoid errors caused by small p-values downstream by setting p-value limits
# all adjusted p-value > or = to (1e-300)
uro.markers["p_val_adj"] <- lapply(uro.markers["p_val_adj"], pmax, 1e-300)

# preparation for volcano plot
# categorize genes (in uro.markers) based on their DEA results
# create new column 'DEA' to classify each gene as up, down, no based on log fold-change and adjusted p-value
uro.markers$DEA <- "NO"
uro.markers$DEA[uro.markers$avg_log2FC > 1 & uro.markers$p_val_adj < 0.05] <- "UP"
uro.markers$DEA[uro.markers$avg_log2FC < -1 & uro.markers$p_val_adj < 0.05] <- "DOWN"
uro.markers$genes <- rownames(uro.markers)

# Volcano plot to visualize DEA results for comparison between ident.1 = 0, ident.2 = 1 at res 0.05
ggplot(uro.markers, aes(x=avg_log2FC, y=-log10(p_val_adj))) + 
  geom_point(aes(colour = DEA), show.legend = FALSE) + 
  scale_colour_manual(values = c("blue", "gray", "red")) +
  geom_hline(yintercept = -log10(0.05), linetype = "dotted") + 
  geom_vline(xintercept = c(-1,1), linetype = "dotted") + 
  theme_bw() + 
  theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
  geom_text_repel(data=subset(uro.markers, (abs(avg_log2FC) > 4 & p_val_adj < 0.05)), aes(x=avg_log2FC, y=-log10(p_val_adj), label=genes), max.overlaps = 1000, size=4.5)

ggsave("plots5/12_volcano_res0.05.png")

# Extract upregulated genes (log fold change > 4, p-value < 0.05 )
upreg_res_0.05 <- uro.markers[uro.markers$avg_log2FC > 4 & uro.markers$p_val_adj < 0.05, "genes"]

# Extract downregulated genes (log fold change > 4, p-value < 0.05 )
downreg_res_0.05<- uro.markers[uro.markers$avg_log2FC < -3 & uro.markers$p_val_adj < 0.05, "genes"]

print(upreg_res_0.05) 
print(downreg_res_0.05)

# upreg genes red (expressed in ident.1 = 0) 
# IGHG4 (immune), HS3ST2 (biosynthesis), BGN (inflammation), PALDI (cell migration + cancer), SCN10A (Na channel subunit, neuronal)
# cannot really define community 0 just yet and is suggested to subdivide based on cluster tree plot
# Will look again at higher resolution (see section 7)

# downreg genes blue (expressed in ident.2 = 1)
# majority linked to cell cycle and division in community 1 
# Not suggested to further subdivide. Go ahead with cell cycle scoring 


######################################################

# I am working on scRNAseq data derived from human bladder cancer 'Urothelial carcinoma'
# Urothelial carcinoma has 2 subtypes - muscle invasive (MIBC), non-muscle invasive (NMIBC)
# NMIBC has recurrence rate 70%-80% if untreated and approx 50% progress to MIBC (Aya T Shalata et al. 2022), where MIBC has a 50-70% mortality rate! (Jong Chul Park et al. 2014)
# I will look at urothelial heterogeneity within non-malignant NMIBC papilloma as intratumoural heterogeneity is a major driver to drug resistance (NA Saunders et al. 2012)

# Data comes from 2 individuals. (M 67yrs, M 58yrs)
# sample BC1 = low grade Ta non-malignant NMIBC cells from (M, 67yrs)
# sample BC6 = low grade Ta non-malignant NMIBC cells from (M, 58yrs)
# differences between the two cell sets could be, but not limited to person differences (age, genetic mutations, lifestyle)

# Firstly, I will access the Genomics shared libraries 
.libPaths("libs/R_4.3.3")

# load relevant libraries
library(tidyverse)
citation("tidyverse")
library(Seurat)
citation("Seurat")
library(clustree)
citation("clustree")
library(ggplot2)
citation("ggplot2")
library(ggrepel)
citation("ggrepel")
library(fgsea)
citation("fgsea")

# 1 Load, preview and explore data ####
counts <- Read10X("data5/02_assessment_options/assessment02_BC1-BC6")

# check dimensions of data (i.e number of genes - rows, cells - columns)
dim(counts)
# number of rows = 38606, columns = 16155

# quick preview of data. Structure, type, first few values
str(counts)

# setting IDs and conditions (here = cancer T stages) for counts
ids <- c("BC1", "BC6")
con <- c("BC1", "BC6")

# create a table 'cell_counts' based on column names from 'counts'
# create metadata df, and annotate my data with labels
cell_counts <- table(substring(colnames(counts),18))

# construct two vectors 'sample', 'conditions' from 'ids', 'con' based on 'cell_counts'
# set up sample and condition labels for the backgrounds
samples <- c()
for (s in 1:length(ids)) { 
  samples <- c(samples, (rep(paste(ids)[s], cell_counts[s])))
} 
conditions <- c()
for (c in 1:length(con)) { 
  conditions <- c(conditions, (rep(paste(con)[c], cell_counts[c])))
} 

# now create a metadata df
metadata <- data.frame(Sample=samples, Conditions=conditions, row.names=colnames(counts))

# then check metadata df cell numbers match expected 
metadata |> group_by(Sample) |> summarise(Cells=n())
metadata |> group_by(Conditions) |> summarise(Cells=n())

# create seurat object 'so' from data - R package used for scRNAseq analysis that stores, organise scRNAseq data, metadata, results 
# rows = genes, columns = cells
# 'meta.data' specifies a metadata df which has additional info of each cell (ie cell types)
so <- CreateSeuratObject(counts=counts, project="Genomics_W5assessment", meta.data=metadata)

# assign cell identities based on 'conditions' column from metadata
# for further analysis to consider cells belonging to different conditions
# i.e later DEA, clustering etc can be done based on 'conditions'
Idents(so) <- so@meta.data$Conditions

# check seurat version
packageVersion("Seurat")

# remove unused variables to tidy up memory
rm(c, s, cell_counts, con, conditions, ids, samples, counts, metadata)

# ################################### #

# 2 Data QC check ####

# removing low quality data from data set 
# high Mt% indicates dying cells, often correlated with low number of features
# high number of features suggests 2 cells are present in sample instead of single cell

# calc %MT gene expression  
# add a new metadata column 'percent.mt' of %MT for each cell
so <- PercentageFeatureSet(so, pattern="^MT-", col.name="percent.mt")

# Visualize distribution of MT gene percentages 
VlnPlot(so, features = "percent.mt")

# violin plot to visualize and compare distribution of 2 selected features - number of features, MT%
# across different cell groups (i.e sample or condition)
# assists QC by easing detection of any outliers in my data (low quality/stressed/dying cells)
VlnPlot(so, features=c("nFeature_RNA","percent.mt"), 
        ncol=2, pt.size=0, group.by="Sample", raster=FALSE)

# distribution pattern is different for BC1 BC6 in nFeature_RNA
# BC1 data is too evenly distributed, no median possible to be observed
# In contrast BC6 has nice violin shaped distribution making it relatively easier to decide cut off points to remove low quality data
# so I decide data cut off threshold based on BC6 

# save plot
ggsave("plots5/1_violinplot_preQC.png")

# Summarize key stats based on distribution above, related to %MT and number of features 
# Create a summary table 
summary_metadata <- so@meta.data |> 
  group_by(Sample) |> 
  summarize(
    mt_med = quantile(percent.mt, 0.5),
    mt_q95 = quantile(percent.mt, 0.95),
    mt_q75_plus_IQR = quantile(percent.mt, 0.75) + (1.5 * IQR(percent.mt)),
    feat_med = median(nFeature_RNA),
    feat_q95 = quantile(nFeature_RNA, 0.95),
    feat_q75_plus_IQR = quantile(nFeature_RNA, 0.75) + (1.5 * IQR(nFeature_RNA)),
    count = n()
  )

# print the table
summary_metadata

# scatter plot number of features VS %MT for each cell 
# assists my QC by identifying cells with high MT%, low features (indicative of dying cells/ empty droplets)
ggplot(so@meta.data, aes(nFeature_RNA, percent.mt, colour=Sample)) +
  geom_point(alpha=0.2) + 
  facet_grid(.~Sample) + 
  theme_bw()

ggsave("plots5/2_nfeatures_vs_%MT_preQC.png")

# now remove low feature/high MT% cells using UQ + (1.5*IQR) rule
# find the upper threshold limit for MT% in both samples, then take the lowest value
mt_filt <- min(summary_metadata$mt_q75_plus_IQR)

# find upper threshold limit for feature number in both, then take the lowest value
feat_max_filt <- min(summary_metadata$feat_q75_plus_IQR)

# print proposed thresholds
mt_filt # 6.370424
feat_max_filt # 5279.75
# These are proposed threshold, but I need to take closer look at data first to decide cut off points  

# Taking a closer look at low quality data first - I will use 2 graphs, one for each feature
# this is in preparation to filter data after visualization 

# 1 - zoomed-in version scatter plot focusing on cells < 10% MT content (clear up feature 1)
# I can exclude cells that are potentially stressed/ dying more clearly since zoomed in view
ggplot(subset(so@meta.data, subset = percent.mt < 10), aes(nFeature_RNA, percent.mt, colour=Sample)) + 
  geom_point(alpha=0.2) + 
  facet_grid(.~Sample) + 
  theme_bw() + 
  xlim(0,8000) + # set limits to x axis to zoom in when viewing graph to decide cut off point

ggsave("plots5/3_%MT_zoomedlowqual_QC.png")

# 2 - density plot for number of features detected per cell (clear up feature 2) 
# across different samples in the seurat object 'so' I created earlier
# from plot I can identify potential problems like under-sampling or a variability in number of genes detected
ggplot(so@meta.data, aes(x=nFeature_RNA, colour=Sample)) + 
  geom_density() + 
  facet_grid(.~Sample) + 
  theme_bw() + 
  xlim(0,3500) # set limits for x axis to zoom in when viewing graph to decide cut off point, the 'dip' in graph 
# graph 'dip' is approx at 1800 on x axis

ggsave("plots5/4_nfeature_zoomedlowqual_QC.png")

# Now I can begin data filtering process! - filter based on 3 conditions
# 1 - cells < 1800 features (genes) detected
# 2 - cells with more than number of features defined by 'feat_max_filt' 
# 3 - % MT gene expression. Threshold 6.370424 prev defined by 'mt_filt'. Again, filtering out cells with high MT content as indicative of low quality cells (i.e dying)
so <- subset(so, subset = nFeature_RNA >= 1800 & nFeature_RNA <= feat_max_filt & percent.mt <= mt_filt)

# after filtering I have new data set - post QC
# so generate new summary data for filtered data set 
so@meta.data |> group_by(Sample) |> summarize(mt_med = quantile(percent.mt, 0.5),
                                              mt_q95 = quantile(percent.mt, 0.95),
                                              mt_q75_plus_IQR = quantile(percent.mt, 0.75) + (1.5 * IQR(percent.mt)),
                                              feat_med = median(nFeature_RNA),
                                              feat_q95 = quantile(nFeature_RNA, 0.95),
                                              feat_q75_plus_IQR = quantile(nFeature_RNA, 0.75) + (1.5 * IQR(nFeature_RNA)),
                                              count = n())

# now visualize new summary data post QC - violin plot like previous
# violin plot to visualize and compare distribution of 2 selected features - number of features, MT%
# across different cell groups (i.e sample or condition)
VlnPlot(so, features=c("nFeature_RNA", "percent.mt"), 
        ncol=2, pt.size=0, group.by="Sample", raster=FALSE)

# Recap - I have chosen thresholds based on BC6 as it had better initial distribution to decide data cut off points
# filtered data only shows cells of good quality facilitating my downstream analysis

ggsave("plots5/5_violinplot_postQC.png")

# some points to consider about the QC process is that : 
# same filters are applied to both data sets BC1 BC6, which would surely have differences (i.e starting distributions, sequencing depth across data sets)
# a threshold that works well for one data set may i.e discard good quality cells in another
# but can only try our best when deciding cut-off point - i.e zooming into graph. It is better than including bad quality data!

# create save point
save.image("savepointA_QC-complete.RData")

# ############################################## #

# 3 Data Integration ####

# merge data sets after filtering each data sets individually. Need to ensure filtering doesn't bias data
# potential problem - imbalanced data set (i.e one was aggressively filtered = has far fewer cells than other, could cause bias downstream)
# use stats to account for these differences (i.e normalization, batch correction, merging strategies)
# here I use normalization 

# Merging data is crucial for comparison 
# different cell types from one person can be more similar than same cell type from 2 people 
# this means same cell type from different donors would be separated during analysis/ clustering - not useful
# integration needed to allow comparison

# split objects by conditions defined in section 1 
con.list <- SplitObject(so, split.by = "Conditions")

# normalize and identify variable features for each data set independently
# also ensure the modified object is returned so con.list is updated
# top 3000 most variable genes selected for downstream analysis
con.list <- lapply(X = con.list, FUN = function(x) {
  x <- NormalizeData(x, normalization.method = "LogNormalize")
  x <- FindVariableFeatures(x, selection.method = "vst", nfeatures = 3000)
  return(x)  
})

# Scale the data and perform PCA for each condition
con.list <- lapply(X = con.list, FUN = function(x) {
  x <- ScaleData(x)
  x <- RunPCA(x)
  return(x)
})

# after normalization and variable feature selection - standard thing to do is integrate the 2 conditions based on shared variable features
# then can proceed with other analyses such as scaling, PCA, clustering, or differential expression
# I prev selected top 3000 most variable genes to look at (nfeature)
# when shared variable features close to 3000 = likely lose biological difference
# but some similarities/ shared variables (to certain extent) needed to merge 2 data sets = ~600-2400, closer halfway the better

# check overlap between most variable features in 2 conditions (BC1,BC6)
length(intersect(VariableFeatures(con.list$BC1), VariableFeatures(con.list$BC6)))
# 1385

# select features repeatedly variable across data sets for integration
intfeats <- SelectIntegrationFeatures(object.list = con.list, nfeatures = 3000)

# integration - finally! Now combine data sets using anchors 
int.anchors <- FindIntegrationAnchors(object.list = con.list, anchor.features = intfeats)
so_donorint <- IntegrateData(anchorset = int.anchors)

# scaling data to ensure genes with higher variance don't dominate analysis following integration, each feature contributes equally
# set default assay to 'integrated' so downstream analysis uses integrated data (corrected values)
# instead of data before integration/ raw counts etc
DefaultAssay(so_donorint) <- "integrated"
so_donorint <- ScaleData(so_donorint, features = rownames(so_donorint))

# clear up after integration 
# splitted conditions, anchors used to integrate datasets, features selected for integration, original 'so' object before processing
rm(con.list, int.anchors, intfeats, so)

# create save point 
save.image("savepointB_QC-integration-complete.RData")
# Extra analysis (1,2,3,4) ####

# since no new packages were able to be downloaded, i do custom things

# Custom Ligand-Receptor Interaction Networks
# use custom lists of ligands and receptors to build your interaction network manually

# Define ligand-receptor pairs (from a database or literature)
ligand_receptor_pairs <- data.frame(
  Ligand = c("CD80", "CD86","CD70", "CCL2", "CXCL9"),
  Receptor = c("CD28", "CD28", "CD27", "CCR2", "CXCL9")
)

# Create a data frame to represent ligand-receptor interactions for each cell
ligand_receptor_matrix <- data.frame(cell = colnames(so), stringsAsFactors = FALSE)

# Loop over each ligand-receptor pair and check if they are present in the gene list
for (i in 1:nrow(ligand_receptor_pairs)) {
  ligand <- ligand_receptor_pairs$Ligand[i]
  receptor <- ligand_receptor_pairs$Receptor[i]
  
  # Check if the ligand and receptor genes exist in the Seurat object (so)
  ligand_receptor_matrix[[paste0("Ligand_", ligand)]] <- ifelse(ligand %in% rownames(so), 1, 0)
  ligand_receptor_matrix[[paste0("Receptor_", receptor)]] <- ifelse(receptor %in% rownames(so), 1, 0)
}

# Assign this new matrix to the Seurat object metadata
so@meta.data <- cbind(so@meta.data, ligand_receptor_matrix[, -1])  # Remove 'cell' column as it's redundant

# Verify if the new columns have been added
head(so@meta.data)

# Calculate the expression of ligands and receptors for each cell type (or cluster) in your Seurat object.
ligand_expression <- FetchData(so_donorint, vars = ligand_receptor_pairs$Ligand)
receptor_expression <- FetchData(seurat_object, vars = ligand_receptor_pairs$Receptor)

# Calculate potential interactions based on co-expression or proximity of ligand-receptor pairs.
# Example: correlation of ligand and receptor expression
interaction_scores <- cor(ligand_expression, receptor_expression)

# Visualize the interactions as a network.
library(igraph)

network_graph <- graph_from_data_frame(ligand_receptor_pairs)

# Open a graphics device (PNG in this case)
png("plots5/ligandreceptorinteractions.png", 
    width = 1000, height = 800, res = 300, bg = "white")

plot(network_graph,
     vertex.label = V(network_graph)$name, 
     vertex.label.cex = 0.3,         # Adjust label size
     vertex.label.color = "black",   # Label color
     vertex.label.dist = 4)          # Distance from node

# Close the device to save the plot
dev.off()
To sort your WooCommerce products by the highest discount using JetSmartFilters' **Sort Filter**, you can use the following steps and PHP snippet:

---

### **1. Add a Custom Meta Key for Discount Calculation**
You need to calculate and store the discount as a custom meta key for each product. This meta key will be used in the JetSmartFilters Sort Filter.

Add the following PHP snippet to your theme's `functions.php` file or a custom plugin:

```php
add_action('save_post_product', 'update_product_discount_meta');
function update_product_discount_meta($post_id) {
    // Ensure it's a product post type
    if (get_post_type($post_id) !== 'product') {
        return;
    }

    // Get the product object
    $product = wc_get_product($post_id);
    if (!$product) {
        return;
    }

    // Get the regular and sale prices
    $regular_price = (float) $product->get_regular_price();
    $sale_price = (float) $product->get_sale_price();

    // Calculate the discount percentage
    if ($regular_price > 0 && $sale_price > 0) {
        $discount = round((($regular_price - $sale_price) / $regular_price) * 100);
    } else {
        $discount = 0; // No discount
    }

    // Update the custom field with the discount value
    update_post_meta($post_id, '_product_discount', $discount);
}
```

---

### **2. Recalculate Discounts for Existing Products**
To apply the discount calculation to existing products, you can run a script or use a plugin like **WP Crontrol** to loop through all products and update their discounts.

```php
add_action('init', 'recalculate_discounts_for_all_products');
function recalculate_discounts_for_all_products() {
    $args = array(
        'post_type' => 'product',
        'posts_per_page' => -1,
        'fields' => 'ids',
    );
    $products = get_posts($args);

    foreach ($products as $product_id) {
        update_product_discount_meta($product_id);
    }
}
```
This will process all products and save their discounts. After running it once, you can remove or comment out this action.

---

### **3. Configure the JetSmartFilters Sort Filter**
In the **Sort Filter** settings (as shown in your screenshot):
1. **Order By**: Select **Meta Key Numeric** (since the discount is a number).
2. **Key**: Enter `_product_discount` (the meta key used in the PHP snippet).
3. **Order**: Set to **DESC** (to sort from highest to lowest discount).

---

### **4. Test the Sorting**
- Add the JetSmartFilters **Sort Filter** to the page where your product listing grid is displayed.
- Ensure the sorting works as expected, with the products having the highest discount appearing first.
Clase	    Media Query Range		Ejemplo de Clase
col-*		Default (XS: <576px)	col-12
col-sm-*	≥576px					col-sm-6
col-md-*	≥768px					col-md-4
col-lg-*	≥992px					col-lg-3
col-xl-*	≥1200px					col-xl-2
col-xxl-*	≥1400px (Bootstrap 5)	col-xxl-1
* GOAL: Within a macro, you want to create a new variable name.
* It could then be used as a data set name, or a variable name to be assigned, etc.;
%macro newname(dset=,varname=);
* You have to make the new name before you use it in
* another data statement;
data _NULL_;
L1=CATS("&varname","_Plus1");
CALL SYMPUT('namenew',L1);
;
* Now you can use the variable name created by SYMPUT;
DATA REVISED;SET &DSET;
   &namenew=&varname+1;
run;
%mend;
Data try;
input ABCD @@;
datalines;
1 2 3 4
;run;
%newname(dset=try,varname=ABCD);
proc print data=revised;run;
https://pubads.g.doubleclick.net/gampad/ads?iu=/21775744923/external/single_ad_samples&sz=640x480&cust_params=sample_ct%3Dredirecterror&ciu_szs=300x250%2C728x90&gdfp_req=1&output=vast&unviewed_position_start=1&env=vp&impl=s&nofb=1&correlator=
https://pubads.g.doubleclick.net/gampad/ads?iu=/21775744923/external/single_ad_samples&sz=640x480&cust_params=sample_ct%3Dredirecterror&ciu_szs=300x250%2C728x90&gdfp_req=1&output=vast&unviewed_position_start=1&env=vp&impl=s&correlator=
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":cute-sun: Boost Days - What's On This Week :cute-sun:"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n\n Happy Monday Melbourne!\n\n"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":coffee: Barista Services on Level 2 :coffee:",
				"emoji": true
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": " Wednesday, 29th January :calendar-date-22:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": " \n\n :lunch: *Light Lunch*: Provided by Kartel Catering from *12:30pm* on *Levels 1 & 2!* \n\n"
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": "Thursday, 23rd January :calendar-date-23:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":breakfast: *Breakfast*: Provided by *Kartel Catering* from *8:30am - 10:30am on Levels 1 & 2*. \n\n "
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Stay tuned for more fun throughout the year. Happy 2025! :party-wx:"
			}
		}
	]
}
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": " :mirror_ball::star: Boost Days - What's on this week! :star::mirror_ball:"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "See below for what's in store for our Boost Days this week:"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-21: Tuesday, 28th January",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n:coffee: *Xero Café*: Café-style beverages and sweet treats \n\n:meow-coffee-intensifies-and-shakes: *Barista Special*: _Iced Chai Latte_ \n\n:pancakes: *Breakfast*: Provided by *Catroux* from *8:30am - 10:30am* \n\n :nail_care::skin-tone-2: *Tipsy Manicures:* Emma offers quick Shape & Shine express manicures, Gel It Manicures & Pedicures and Hand N’ Arm & Foot N’ Leg Massages, there's a treatment for everyone's pampering needs. treat yourself <https://docs.google.com/spreadsheets/d/1pTGAD8oFXmPF890Uzj4d-Crfch8muuzEzDnNyBA9ReY/edit?gid=23583621#gid=23583621|*here.*> "
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-23: Thursday, 30th January",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":coffee: *Xero Café*: Café-style beverages and sweet treats \n\n :meow-coffee-intensifies-and-shakes: *Barista Special*: _Iced Chai Latte_ \n\n :sandwich: *Light Lunch*: Provided by *Catroux* from *12:30pm - 1:30pm* \n\n  \n\n"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Stay tuned to this channel for more details, check out the <https://calendar.google.com/calendar/u/0?cid=eGVyby5jb21fMXM4M3NiZzc1dnY0aThpY2FiZDZvZ2xncW9AZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ|*Auckland Social Calendar*>, and get ready to Boost your workdays!\n\nLove,\nWX :wx:"
			}
		}
	]
}
gdb -tui -ex=r --args out/Debug/chrome --disable-seccomp-sandbox \
    http://google.com
star

Sun Jan 26 2025 02:08:30 GMT+0000 (Coordinated Universal Time) https://www.programiz.com/c-programming/online-compiler/

@Narendra

star

Sun Jan 26 2025 01:24:42 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:24:30 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:24:20 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:24:10 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:24:00 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:23:50 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:23:38 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:23:28 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:23:00 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:22:47 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:22:35 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:22:25 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:22:12 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:21:59 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:21:47 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:21:34 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:21:21 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:21:01 GMT+0000 (Coordinated Universal Time) https://flexboxfroggy.com/#es

@dessire

star

Sun Jan 26 2025 01:00:48 GMT+0000 (Coordinated Universal Time) https://www.programiz.com/c-programming/online-compiler/

@Narendra

star

Sun Jan 26 2025 00:12:38 GMT+0000 (Coordinated Universal Time) https://www.khanacademy.org/math/cc-fifth-grade-math/imp-place-value-and-decimals/imp-decimals-in-expanded-form/v/expanding-out-a-decimal-by-place-value

@yosoyakinaserro

star

Sat Jan 25 2025 18:24:27 GMT+0000 (Coordinated Universal Time)

@Shira

star

Sat Jan 25 2025 18:22:55 GMT+0000 (Coordinated Universal Time)

@mastaklance

star

Sat Jan 25 2025 18:22:06 GMT+0000 (Coordinated Universal Time)

@mastaklance

star

Sat Jan 25 2025 17:58:29 GMT+0000 (Coordinated Universal Time)

@Shira

star

Sat Jan 25 2025 17:49:16 GMT+0000 (Coordinated Universal Time)

@Shira

star

Sat Jan 25 2025 17:48:16 GMT+0000 (Coordinated Universal Time)

@Shira

star

Sat Jan 25 2025 12:55:48 GMT+0000 (Coordinated Universal Time) https://goldapple.ru/19000342899-fistaskovaa-nacinka

@GADJI123

star

Fri Jan 24 2025 20:22:45 GMT+0000 (Coordinated Universal Time)

@caovillanueva #html

star

Fri Jan 24 2025 03:57:39 GMT+0000 (Coordinated Universal Time) https://medium.com/@kldurga999/audio-translator-using-python-gtts-google-trans-library-4c65ce68b94b

@chunder

star

Fri Jan 24 2025 03:35:51 GMT+0000 (Coordinated Universal Time)

@enter123

star

Fri Jan 24 2025 02:00:51 GMT+0000 (Coordinated Universal Time)

@ktyle #python

star

Thu Jan 23 2025 23:03:29 GMT+0000 (Coordinated Universal Time)

@jrray ##linux

star

Thu Jan 23 2025 22:45:43 GMT+0000 (Coordinated Universal Time) https://learn.zybooks.com/zybook/PSUIST140Bueno-VesgaSpring2025/chapter/2/section/4

@shivamp

star

Thu Jan 23 2025 21:02:05 GMT+0000 (Coordinated Universal Time)

@Checkmate

star

Thu Jan 23 2025 20:11:47 GMT+0000 (Coordinated Universal Time)

@eho135

star

Thu Jan 23 2025 20:07:12 GMT+0000 (Coordinated Universal Time)

@eho135

star

Thu Jan 23 2025 19:57:35 GMT+0000 (Coordinated Universal Time)

@eho135

star

Thu Jan 23 2025 19:53:49 GMT+0000 (Coordinated Universal Time)

@eho135

star

Thu Jan 23 2025 19:35:44 GMT+0000 (Coordinated Universal Time)

@Y@sir

star

Thu Jan 23 2025 17:05:38 GMT+0000 (Coordinated Universal Time)

@caovillanueva #html

star

Thu Jan 23 2025 16:22:13 GMT+0000 (Coordinated Universal Time) https://www.stattutorials.com/SAS/TUTORIAL-SAS-FUNCTION-SYMPUT1.html

@VanLemaime

star

Thu Jan 23 2025 09:02:31 GMT+0000 (Coordinated Universal Time) https://developers.google.com/interactive-media-ads/docs/sdks/html5/client-side/tags

@RL

star

Thu Jan 23 2025 09:02:22 GMT+0000 (Coordinated Universal Time) https://developers.google.com/interactive-media-ads/docs/sdks/html5/client-side/tags

@RL

star

Thu Jan 23 2025 06:24:20 GMT+0000 (Coordinated Universal Time) https://www.exploit-db.com/ghdb/8446

@v1ral_ITS

star

Thu Jan 23 2025 03:39:25 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Thu Jan 23 2025 03:23:19 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Wed Jan 22 2025 22:29:41 GMT+0000 (Coordinated Universal Time) https://porngen.art/undress-ai/

@aqdwsad

star

Wed Jan 22 2025 22:11:11 GMT+0000 (Coordinated Universal Time) https://chromium.googlesource.com/chromium/src/+/HEAD/docs/linux/debugging.md

@streatsweaper7

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension