Snippets Collections
<?php if (is_active_sidebar( 'footer-menu-services-widget-area' )) : ?>     
            <div class="grid-25 tablet-grid-50 mobile-grid-100">
                <ul class="sidebar footer-n-menu">
                    <?php dynamic_sidebar( 'footer-menu-services-widget-area' ); ?>
                </ul>
            </div>
            <?php endif;?>
            
            <?php if (is_active_sidebar( 'footer-menu-about-widget-area' )) :?>
            
            <div class="grid-15 tablet-grid-50 mobile-grid-100">
                <ul class="sidebar footer-n-menu">
                    <?php dynamic_sidebar( 'footer-menu-about-widget-area' ); ?>
                </ul>
            </div>
            
            <?php endif;?>
@app.route('/access_logs_data')
def access_logs_data():
    conn = None
    cursor = None
    try:
        conn = mysql.connector.connect(
            host=MYSQL_HOST,
            user=MYSQL_USER,
            password=MYSQL_PASSWORD,
            database=MYSQL_DATABASE
        )
        cursor = conn.cursor(dictionary=True)
        
        # Create access_logs table if it doesn't exist
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS access_logs (
                id INT AUTO_INCREMENT PRIMARY KEY,
                license_plate VARCHAR(255) NOT NULL,
                feed_type VARCHAR(50) NOT NULL,
                action VARCHAR(50) NOT NULL,
                timestamp DATETIME NOT NULL
            )
        ''')
        
        # Fetch all logs
        cursor.execute("SELECT * FROM access_logs ORDER BY timestamp DESC")
        logs = cursor.fetchall()
        
        # Process logs for all_time_stats
        entrances = [log for log in logs if log['feed_type'].lower() == 'entrance']
        exits = [log for log in logs if log['feed_type'].lower() == 'exit']
        granted = [log for log in logs if log['action'].lower() == 'auto']
        denied = [log for log in logs if log['action'].lower() != 'auto']
        
        # Get unique plates
        registered_plates = set(log['license_plate'] for log in granted)
        unregistered_plates = set(log['license_plate'] for log in denied)
        
        # Find peak hour
        hour_counts = Counter()
        for log in logs:
            timestamp = log['timestamp']
            if hasattr(timestamp, 'hour'):
                hour = timestamp.hour
            else:
                # Handle string timestamps if needed
                try:
                    hour = datetime.fromisoformat(str(timestamp)).hour
                except:
                    hour = 0
            hour_counts[hour] += 1
        
        peak_hour = max(hour_counts.items(), key=lambda x: x[1])[0] if hour_counts else 0
        
        # Calculate average daily traffic
        if logs:
            # Get unique dates from logs
            dates = set()
            for log in logs:
                timestamp = log['timestamp']
                if hasattr(timestamp, 'date'):
                    dates.add(timestamp.date())
                else:
                    try:
                        dates.add(datetime.fromisoformat(str(timestamp)).date())
                    except:
                        pass
            
            avg_traffic = round(len(logs) / max(1, len(dates)))
        else:
            avg_traffic = 0
        
        # Create all_time_stats dictionary
        all_time_stats = {
            'total_entrances': len(entrances),
            'total_exits': len(exits),
            'granted_access': len(granted),
            'denied_access': len(denied),
            'registered_vehicles': len(registered_plates),
            'unregistered_vehicles': len(unregistered_plates),
            'peak_hour': f"{peak_hour:02d}:00",
            'avg_traffic': avg_traffic
        }
        
        # Process data for charts (daily, weekly, monthly)
        now = datetime.now()
        
        # Create reportData structure
        report_data = {
            'day': process_period_data(logs, now, 'day'),
            'week': process_period_data(logs, now, 'week'),
            'month': process_period_data(logs, now, 'month')
        }
        
        return jsonify({
            'all_time_stats': all_time_stats,
            'report_data': report_data
        })
    
    except mysql.connector.Error as err:
        logging.error(f"MySQL Error fetching reports data: {err}")
        return jsonify({'error': 'Error fetching reports data'}), 500
    finally:
        if cursor:
            cursor.close()
        if conn and conn.is_connected():
            conn.close()
def save_vehicle_owner(license_plate, owner_name, owner_contact, owner_address):
    conn = None
    cursor = None
    try:
        conn = mysql.connector.connect(
            host=MYSQL_HOST,
            user=MYSQL_USER,
            password=MYSQL_PASSWORD,
            database=MYSQL_DATABASE
        )
        cursor = conn.cursor()
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS avbs (
                license_plate VARCHAR(255) PRIMARY KEY,
                owner_name VARCHAR(255) NOT NULL,
                owner_contact VARCHAR(255),
                owner_address TEXT,
                registration_timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        ''')
        sql = "INSERT INTO avbs (license_plate, owner_name, owner_contact, owner_address) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE owner_name=%s, owner_contact=%s, owner_address=%s, registration_timestamp=CURRENT_TIMESTAMP"
        val = (license_plate, owner_name, owner_contact, owner_address, owner_name, owner_contact, owner_address)
        cursor.execute(sql, val)
        conn.commit()
        logging.info(f"Saved/Updated owner details for license plate: {license_plate}")
        return True
    except mysql.connector.Error as err:
        logging.error(f"MySQL Error saving owner details: {err}")
        return False
    finally:
        if cursor:
            cursor.close()
        if conn and conn.is_connected():
            conn.close()

CAMERA_CONFIG = {
    'entrance': 0,  # First USB camera index for entrance
    'exit': 1,      # Second USB camera index for exit
    'single_camera_mode': False  # Set to False to use two separate cameras
}

# MySQL configuration
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PASSWORD = ''  
MYSQL_DATABASE = 'avbs' 

# Arduino configuration
ARDUINO_PORT = 'COM5'  # Change this to match your Arduino's COM port
ARDUINO_BAUD_RATE = 9600
arduino_connected = False
arduino_serial = None

YOLO_CONF_THRESHOLD = 0.25  # Confidence threshold for YOLO detection
PADDLE_OCR_CONF_THRESHOLD = 0.65  # Confidence threshold for OCR
SAVE_INTERVAL_SECONDS = 60  # Interval for saving JSON data
JSON_OUTPUT_DIR = "output_json"  # Directory for JSON output
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":xeros-connect: Boost Days - What's on this week! :xeros-connect:"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Mōrena Ahuriri :wave: Happy Monday, let's get ready to dive into another week with our Xeros Connect Boost Day programme! See below for what's in store :eyes:"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-11: Wednesday, 11th June :camel:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n:coffee: *Café Partnership*: Enjoy coffee and café-style beverages from our cafe partner, *Adoro*, located in our office building *8:00AM - 11:30AM*.\n:muffin: *Breakfast*: Provided by *Design Cuisine* from *9:30AM-10:30AM* in the Kitchen."
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-12: Thursday, 12th June :duck:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n:coffee: *Café Partnership*: Enjoy coffee and café-style beverages from our cafe partner, *Adoro*, located in our office building *8:00AM - 11:30AM*.\n:sandwich: *Lunch*: Provided by *Roam* from *12:30PM-1:30PM* in the Kitchen."
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "*What else?* :party: \nWhat would you like from our future socials? \nMore food, drinks, or entertainment? \nWe'd love to hear feedback and ideas from you! \nDM your local WX coordinator or leave any suggestions in the thread :comment: \n*Keep up with us* :eyes: \nStay tuned to this channel for more details, check out the <https://calendar.google.com/calendar/u/0?cid=eGVyby5jb21fbXRhc2ZucThjaTl1b3BpY284dXN0OWlhdDRAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ|*Hawkes Bay Social Calendar*>, and get ready to Boost your workdays!\n\nWX Team :party-wx:"
			}
		}
	]
}
# Load necessary libraries
library(tidyverse)
library(tidytext)
library(lubridate)

# Sample text data with dates
feedback <- data.frame(
  text = c("I love this product!", "Terrible service.", "Okay experience.",
           "Wonderful!", "Worst support ever."),
  date = as.Date(c("2024-01-10", "2024-01-12", "2024-01-15", "2024-01-18", "2024-01-20"))
)

# Tokenize, clean, and assign sentiment
data("stop_words")
sentiment_data <- feedback %>%
  unnest_tokens(word, text) %>%
  anti_join(stop_words, by = "word") %>%
  inner_join(get_sentiments("bing"), by = "word") %>%
  count(date, sentiment) %>%
  pivot_wider(names_from = sentiment, values_from = n, values_fill = 0) %>%
  mutate(score = positive - negative,
         sentiment_label = case_when(
           score > 0 ~ "Positive",
           score < 0 ~ "Negative",
           TRUE ~ "Neutral"
         ))

# Trend visualization (bar plot over time)
ggplot(sentiment_data, aes(x = date, y = score, fill = sentiment_label)) +
  geom_col() +
  scale_fill_manual(values = c("Positive" = "green", "Negative" = "red", "Neutral" = "gray")) +
  labs(title = "Sentiment Trend Over Time", x = "Date", y = "Sentiment Score") +
  theme_minimal()

# Distribution visualization (pie chart)
ggplot(sentiment_data, aes(x = "", fill = sentiment_label)) +
  geom_bar(width = 1) +
  coord_polar("y") +
  theme_void() +
  labs(title = "Overall Sentiment Distribution")
# Apriori Algorithm in R

# Install and load required package
install.packages("arules")
library(arules)

# Load built-in transaction data
data("Groceries")

# Apply Apriori algorithm to find frequent itemsets
frequent_items <- apriori(Groceries, parameter = list(supp = 0.01, target = "frequent itemsets"))

# Generate association rules
rules <- apriori(Groceries, parameter = list(supp = 0.01, confidence = 0.5))

# Sort rules by lift
sorted_rules <- sort(rules, by = "lift", decreasing = TRUE)

# View top results
inspect(head(frequent_items, 10))
inspect(head(sorted_rules, 10))
#logistic Regression
# Install the 'caret' package (only run once; comment out if already installed)
install.packages("caret")

# Load the 'caret' package for machine learning utilities
library(caret)

# Load the built-in iris dataset
data(iris)

# Convert the problem into binary classification:
# Setosa (1) vs Non-Setosa (0)
iris$Label <- ifelse(iris$Species == "setosa", 1, 0)

# Remove the original Species column as it's no longer needed
iris <- iris[, -5]

# Set seed for reproducibility
set.seed(123)

# Split the data: 80% for training and 20% for testing
idx <- createDataPartition(iris$Label, p = 0.8, list = FALSE)
train <- iris[idx, ]   # Training set
test <- iris[-idx, ]   # Test set

# Train a logistic regression model using the training data
model <- glm(Label ~ ., data = train, family = "binomial")

# Predict probabilities on the test set and convert to class labels (1 or 0)
pred <- ifelse(predict(model, test, type = "response") > 0.5, 1, 0)

# Generate a confusion matrix to evaluate model performance
conf <- confusionMatrix(factor(pred), factor(test$Label))

# Display evaluation metrics
cat("Precision:", round(conf$byClass["Precision"], 2), "\n")
cat("Recall:", round(conf$byClass["Recall"], 2), "\n")
cat("F1-score:", round(conf$byClass["F1"], 2), "\n")
# K mean clustering
install.packages(c("ggplot2", "factoextra", "cluster"))

library(ggplot2)
library(factoextra)
library(cluster)

data("iris")
irisdata <- scale(iris[, -5])

set.seed(123)

fviz_nbclust(irisdata, kmeans, method = "wss")

model <- kmeans(irisdata, centers = 3, nstart = 25)

iris$Cluster <- as.factor(model$cluster)

print(model$centers)

table(model$cluster)

fviz_cluster(model, data = irisdata)

sil <- silhouette(model$cluster, dist(irisdata))

fviz_silhouette(sil)
# Load libraries
library(tm)
library(SnowballC)
library(caret)
library(e1071)

# Load and prepare data
sms_data <- read.csv("https://raw.githubusercontent.com/jbrownlee/Datasets/master/sms_spam.csv", stringsAsFactors = FALSE)
colnames(sms_data) <- c("Label", "Message")
sms_data$Label <- factor(sms_data$Label, levels = c("ham", "spam"))

# Clean and preprocess text
corpus <- VCorpus(VectorSource(sms_data$Message))
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
corpus <- tm_map(corpus, stemDocument)
corpus <- tm_map(corpus, stripWhitespace)

# Create Document-Term Matrix
dtm <- DocumentTermMatrix(corpus)
dtm_df <- as.data.frame(as.matrix(dtm))
dtm_df$Label <- sms_data$Label

# Split into training and testing sets
set.seed(123)
split_index <- createDataPartition(dtm_df$Label, p = 0.8, list = FALSE)
train_data <- dtm_df[split_index, ]
test_data <- dtm_df[-split_index, ]

# Separate features and labels
x_train <- train_data[, -ncol(train_data)]
y_train <- train_data$Label
x_test <- test_data[, -ncol(test_data)]
y_test <- test_data$Label

# Train Naive Bayes model and predict
nb_model <- naiveBayes(x_train, y_train)
predictions <- predict(nb_model, x_test)

# Evaluate performance
conf_mat <- confusionMatrix(predictions, y_test)
print(conf_mat)
cat("Accuracy:", round(conf_mat$overall["Accuracy"] * 100, 2), "%\n")
# Load packages
library(class)
library(ggplot2)
library(caret)

# Normalize and prepare data
data(iris)
norm <- function(x) (x - min(x)) / (max(x) - min(x))
iris_norm <- as.data.frame(lapply(iris[1:4], norm))
iris_norm$Species <- iris$Species

# Train-test split
set.seed(123)
idx <- createDataPartition(iris_norm$Species, p = 0.8, list = FALSE)
train_X <- iris_norm[idx, 1:4]; test_X <- iris_norm[-idx, 1:4]
train_Y <- iris_norm[idx, 5]; test_Y <- iris_norm[-idx, 5]

# Evaluate KNN for various k
eval_knn <- function(k) mean(knn(train_X, test_X, train_Y, k) == test_Y) * 100
k_vals <- seq(1, 20, 2)
acc <- sapply(k_vals, eval_knn)
results <- data.frame(K = k_vals, Accuracy = acc)
print(results)

# Plot accuracy vs. K
ggplot(results, aes(K, Accuracy)) +
  geom_line(color = "blue") + geom_point(color = "red") +
  labs(title = "KNN Accuracy vs. K", x = "K", y = "Accuracy (%)") +
  theme_minimal()

# Final model with optimal K
final_pred <- knn(train_X, test_X, train_Y, k = 5)
print(confusionMatrix(final_pred, test_Y))
# Load required packages
library(rpart)
library(rpart.plot)
library(ggplot2)
library(caret)

# Prepare data
data(iris)
set.seed(123)
index <- createDataPartition(iris$Species, p = 0.8, list = FALSE)
train <- iris[index, ]; test <- iris[-index, ]

# Train decision tree
model <- rpart(Species ~ ., data = train, method = "class")
rpart.plot(model, main = "Decision Tree", extra = 104)

# Predict and evaluate
pred <- predict(model, test, type = "class")
print(confusionMatrix(pred, test$Species))

# Visualize decision boundaries (for Sepal features)
grid <- expand.grid(
  Sepal.Length = seq(min(iris$Sepal.Length), max(iris$Sepal.Length), 0.1),
  Sepal.Width = seq(min(iris$Sepal.Width), max(iris$Sepal.Width), 0.1)
)
grid$Species <- predict(model, newdata = grid, type = "class")

ggplot(iris, aes(Sepal.Length, Sepal.Width, color = Species)) +
  geom_point() +
  geom_tile(data = grid, aes(fill = Species), alpha = 0.2) +
  labs(title = "Decision Tree Boundaries (Sepal Features)") +
  theme_minimal()
{
	"blocks": [
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":xero_pride::house_cupcake::rainbow::pink-heart: What's On!  :xero_pride::house_cupcake::rainbow::pink-heart:",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Good morning Brisbane! Please see below for what's on this week."
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-9: Monday, 9th June",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "\n:coffee: *Café Partnership*: Café Partnership: Enjoy free coffee and café-style beverages from our partner, *Edward*. \n\n :lunch: *Lunch*: from *12pm* in the kitchen."
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-11: Wednesday, 11th June",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":coffee: *Café Partnership*: Café Partnership: Enjoy coffee and café-style beverages from our partner, *Edward*. \n\n :late-cake: *Morning Tea*: from *10am* in the kitchen."
			}
		},
		{
			"type": "header",
			"text": {
				"type": "plain_text",
				"text": ":calendar-date-13: Friday, 13th June",
				"emoji": true
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":rainbow: :pink-heart: #rainbow-x and the WX Team are gearing up for our *Pride Social* on *Friday 13th June!* Join us for a colourful evening filled with delicious food and drinks. Make sure you wear lots of colour to celebrate with us! :pink-heart::rainbow:"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "*LATER THIS MONTH:*"
			}
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": ":blob-party: *27th June:* Social Happy Hour: Wind down over some drinks & nibbles with your work pals!"
			}
		},
		{
			"type": "divider"
		},
		{
			"type": "section",
			"text": {
				"type": "mrkdwn",
				"text": "Stay tuned to this channel for more details, check out the <https://calendar.google.com/calendar/u/0?cid=Y19uY2M4cDN1NDRsdTdhczE0MDhvYjZhNnRjb0Bncm91cC5jYWxlbmRhci5nb29nbGUuY29t|*Brisbane Social Calendar*>, and get ready to Boost your workdays!\n\nLove,\nWX Team :party-wx:"
			}
		}
	]
}
The landscape of iOS app development is continually evolving, driven by Apple's consistent innovations in hardware and software. Developers are constantly adopting new tools and paradigms to build more sophisticated, intuitive, and secure applications.

Here's the some of the latest trends and technologies that shaping iOS app development process:

◦ SwiftUI's Continued Dominance: Apple's declarative UI framework, SwiftUI, is maturing rapidly. It allows developers to build user interfaces across all Apple platforms (iOS, iPadOS, macOS, watchOS, tvOS, and even visionOS) with less code and in a more intuitive way. Its integration with Xcode previews streamlines the design process.

◦ Artificial Intelligence (AI) and Machine Learning (ML) Integration: Core ML, Create ML, and advancements in the Neural Engine allow developers to embed powerful AI/ML capabilities directly into apps. This enables features like intelligent recommendations, advanced image recognition, natural language processing, and smarter personal assistants, often with enhanced privacy as processing occurs on-device.

◦ Augmented Reality (AR) with ARKit: AR experiences continue to become more immersive and integrated into various app categories, from gaming and retail (virtual try-ons) to education and healthcare. ARKit's ongoing enhancements provide developers with robust tools to create compelling AR content.

◦ Enhanced Privacy and Security: Apple's strong emphasis on user privacy remains a core trend. Features like App Tracking Transparency (ATT) and Passkeys are pushing developers to build apps with privacy-by-design, focusing on transparent data handling and secure authentication.

◦ Spatial Computing and VisionOS: With the advent of Apple Vision Pro, spatial computing is becoming a significant area for developers. While still nascent, creating apps that seamlessly blend digital content with the real world or offer fully immersive experiences represents a new frontier for iOS developers.

◦ Swift Concurrency and Performance Optimization: The adoption of Swift's structured concurrency features (async/await, Actors) is improving the performance and reliability of complex iOS applications by simplifying asynchronous code execution and preventing common concurrency bugs.

◦ Widgets, Live Activities, and App Clips: Expanding beyond the main app, developers are leveraging widgets for at-a-glance information, Live Activities for real-time updates directly on the Lock Screen or Dynamic Island, and App Clips for lightweight, on-demand app experiences without full downloads.

These trends collectively aim to deliver more personal, intelligent, and interconnected user experiences across the Apple ecosystem.

Looking to incorporate these cutting-edge advanced technologies into your next project? Then Appticz is the best for the entrepreneurs who are looking to innovative iOS app development company, Utilizing these latest trends and technologies to build your responsive solutions that drive exceptional user engagement and business growth.
SELECT 
  Id, 
  PermissionsRead, 
  PermissionsEdit, 
  SobjectType, 
  Field, 
  Parent.Type, 
  Parent.Name, 
  Parent.PermissionSetGroup.DeveloperName, 
  Parent.Profile.Name 
FROM FieldPermissions
WHERE Field = 'Object__c.Field__c'
create table team_kingkong.mid_limits_shivam as (
WITH latest_limits AS (
    SELECT
        merchantid,
        identifier,
        maxamtpermonth,
        ROW_NUMBER() OVER (PARTITION BY merchantid, identifier ORDER BY modifieddate DESC) AS rn
    FROM (
        SELECT
            merchantid,
            identifier,
            maxamtpermonth,
            (DATE_DIFF('millisecond', TIMESTAMP '1970-01-01 5:30:00', CAST(modifieddate AS TIMESTAMP))) AS modifieddate
        FROM merchant_velocity.instrument_historic_data_snapshot_v3
        WHERE dl_last_updated >= DATE '2010-01-01'
        
        UNION ALL
        
        SELECT
            merchantid,
            identifier,
            maxamtpermonth,
            modifieddate
        FROM TP_S_2022_MD_EVENTLOG_001.TP_S_2022_MD_EVENTLOG_001_snapshot_v3
        WHERE dl_last_updated >= DATE '2010-01-01'
    )
),

pivoted_limits AS (
    SELECT
        merchantid,
        MAX(CASE WHEN identifier = 'UPI_CC' THEN maxamtpermonth END) AS UPI_CC_limit,
        MAX(CASE WHEN identifier = 'UPI' THEN maxamtpermonth END) AS UPI_limit,
        MAX(CASE WHEN identifier = 'CC' THEN maxamtpermonth END) AS CC_limit,
        MAX(CASE WHEN identifier = 'DC' THEN maxamtpermonth END) AS DC_limit,
        MAX(CASE WHEN identifier = 'UPI_CREDITLINE' THEN maxamtpermonth END) AS UPI_CREDITLINE_limit,
        MAX(CASE WHEN identifier = 'PER_MID' THEN maxamtpermonth END) AS overall_limit
    FROM latest_limits
    WHERE rn = 1
    GROUP BY merchantid
),

merchant_types AS (
    SELECT 
        v1.merchantid,
        CASE 
            WHEN o_mid IS NOT NULL THEN 'Online'
            WHEN e_mid IS NOT NULL THEN 'EDC' 
            ELSE 'QR' 
        END AS EDC_QR
    FROM pivoted_limits v1
    LEFT JOIN (
        SELECT DISTINCT merchant_id AS o_mid
        FROM datalake.online_payment_merchants
    ) m_3 ON v1.merchantid = m_3.o_mid
    LEFT JOIN (
        SELECT DISTINCT mid AS e_mid
        FROM paytmpgdb.entity_edc_info_snapshot_v3
        WHERE terminal_status = 'ACTIVE'
        AND dl_last_updated >= DATE '2010-01-01'
    ) m_4 ON v1.merchantid = m_4.e_mid
)
select * from (
SELECT
    p.merchantid,
    m.EDC_QR,
    CAST(p.UPI_CC_limit AS double)/100 AS UPI_CC_limit,
    CAST(p.UPI_limit AS double)/100 AS UPI_limit,
    CAST(p.CC_limit AS double)/100 AS CC_limit,
    CAST(p.DC_limit AS double)/100 AS DC_limit,
    CAST(p.UPI_CREDITLINE_limit AS double)/100 AS UPI_CREDITLINE_limit,
    CAST(p.overall_limit AS double)/100 AS overall_limit
    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) 
    --     THEN '1' ELSE '0' 
    -- END AS unlimited_overall_limit_flag,
    
    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) OR (p.UPI_CC_limit IS NULL OR p.UPI_CC_limit = -1) THEN 'NA'
    --     WHEN p.UPI_CC_limit > p.overall_limit THEN '1' 
    --     ELSE '0' 
    -- END AS UPI_CC_limit_ov_lmt_flag,

    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) OR (p.UPI_limit IS NULL OR p.UPI_limit = -1) THEN 'NA'
    --     WHEN p.UPI_limit > p.overall_limit THEN '1' 
    --     ELSE '0' 
    -- END AS UPI_limit_ov_lmt_flag,

    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) OR (p.CC_limit IS NULL OR p.CC_limit = -1) THEN 'NA'
    --     WHEN p.CC_limit > p.overall_limit THEN '1' 
    --     ELSE '0' 
    -- END AS CC_limit_ov_lmt_flag,

    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) OR (p.DC_limit IS NULL OR p.DC_limit = -1) THEN 'NA'
    --     WHEN p.DC_limit > p.overall_limit THEN '1' 
    --     ELSE '0' 
    -- END AS DC_limit_ov_lmt_flag,

    -- CASE 
    --     WHEN (p.overall_limit IS NULL OR p.overall_limit = -1) OR (p.UPI_CREDITLINE_limit IS NULL OR p.UPI_CREDITLINE_limit = -1) THEN 'NA'
    --     WHEN p.UPI_CREDITLINE_limit > p.overall_limit THEN '1' 
    --     ELSE '0' 
    -- END AS UPI_CREDITLINE_limit_ov_lmt_flag,

    -- CASE 
    --     WHEN (p.UPI_limit IS NULL OR p.UPI_limit = -1) OR (p.UPI_CC_limit IS NULL OR p.UPI_CC_limit = -1) THEN 'NA'
    --     WHEN p.UPI_CC_limit > p.UPI_limit THEN '1' 
    --     ELSE '0' 
    -- END AS UPI_CC_limit_UPI_limit_flag,

    -- CASE 
    --     WHEN (p.UPI_limit IS NULL OR p.UPI_limit = -1) OR (p.UPI_CREDITLINE_limit IS NULL OR p.UPI_CREDITLINE_limit = -1) THEN 'NA'
    --     WHEN p.UPI_CREDITLINE_limit > p.UPI_limit THEN '1' 
    --     ELSE '0' 
    -- END AS UPI_CREDITLINE_limit_UPI_limit_flag
FROM pivoted_limits p
JOIN merchant_types m ON p.merchantid = m.merchantid));
-- where UPI_CC_limit_ov_lmt_flag = '1'  OR UPI_limit_ov_lmt_flag = '1'
--     OR CC_limit_ov_lmt_flag = '1' OR DC_limit_ov_lmt_flag = '1'
--     OR UPI_CREDITLINE_limit_ov_lmt_flag = '1' OR UPI_CC_limit_UPI_limit_flag = '1'
function updateSelect(e) {
    const { target } = e;

    const value = target.value;

    const cadenceRow = inputCadence.closest(".form__row");
    const elevationRow = inputElevation.closest(".form__row");

    // Remove the hidden class from both rows first
    cadenceRow.classList.remove("form__row--hidden");
    elevationRow.classList.remove("form__row--hidden");

    const selected = {
      cycling: elevationRow,
      running: cadenceRow,
    };

    selected[value].classList.add("form__row--hidden");
  }
-- RISK 306
-- If payer account and payee vpa count of p2p transactions in previous 24 hours is more than equal to 10 then BLOCK

-- CREATE TABLE team_kingkong.tpap_risk306_breaches AS
INSERT INTO team_kingkong.tpap_risk306_breaches
with tpap_base as
(
SELECT DISTINCT B.*, C.category
, IF(D.upi_subtype IS NOT NULL, D.upi_subtype, IF(C.category = 'LITE_MANDATE', 'UPI_LITE_MANDATE', '')) AS upi_subtype
FROM
    (SELECT txn_id, scope_cust_id,
    MAX(CASE WHEN participant_type = 'PAYER' THEN vpa END) AS payer_vpa,
    MAX(CASE WHEN participant_type = 'PAYEE' THEN vpa END) AS payee_vpa,
    MAX(created_on) as txn_date,
    MAX(amount) AS txn_amount,
    created_on AS txn_time
    FROM switch.txn_participants_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    AND DATE(created_on) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    AND vpa IS NOT NULL
    GROUP BY 1,2,7)B
inner join
    (select txn_id, category
    from switch.txn_info_snapshot_v3
    where DATE(dl_last_updated) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    and DATE(created_on) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    and upper(status) in ('SUCCESS')) C
on B.txn_id = C.txn_id
INNER JOIN
    (SELECT txnid
    , regexp_replace(cast(json_extract(request, '$.evaluationType') as varchar), '"', '') AS upi_subtype
    FROM tpap_hss.upi_switchv2_dwh_risk_data_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN date'2025-05-01' AND DATE'2025-05-31'
    AND (lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) LIKE '%@paytm%'
    or lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) like '%@pt%')
    AND json_extract_scalar(response, '$.action_recommended') <> 'BLOCK'
    AND regexp_replace(cast(json_extract(request, '$.requestPayload.payerType') AS varchar),'"','') = 'PERSON'
    AND regexp_replace(cast(json_extract(request, '$.requestPayload.payeeType') AS varchar),'"','') = 'ENTITY')D
ON B.txn_id = D.txnid
WHERE (payer_vpa LIKE '%@paytm%') OR (payer_vpa LIKE '%@pt%')
AND payee_vpa LIKE '%@%' AND payee_vpa <> 'onpaytmgas@paytm'
)
 
SELECT * FROM
    (SELECT t1.payer_vpa,
      t1.payee_vpa,
      t1.txn_id,
      t1.txn_amount,
      t1.category,
      t1.upi_subtype,
      t1.txn_time,
      DATE(t1.txn_time) AS txn_date,
      COUNT(t2.txn_id) AS prior_txns_last_24h,
      15 as threshold
    FROM tpap_base t1
    INNER JOIN tpap_base t2
      ON t1.payer_vpa = t2.payer_vpa
      AND t1.payee_vpa = t2.payee_vpa
      AND t2.txn_time BETWEEN (t1.txn_time - INTERVAL '86400' SECOND) AND t1.txn_time
      AND t1.txn_id <> t2.txn_id
    GROUP BY t1.payer_vpa, t1.payee_vpa, t1.txn_id, t1.txn_amount, t1.category, t1.upi_subtype, t1.txn_time, DATE(t1.txn_time))
WHERE prior_txns_last_24h > threshold
;
-- RISK005
-- 50 Txn limit of one payer in 24 hours

-- CREATE TABLE team_kingkong.tpap_risk005_breaches AS
INSERT INTO team_kingkong.tpap_risk005_breaches
with tpap_base as
(SELECT DISTINCT B.*, C.category
, IF(D.upi_subtype IS NOT NULL, D.upi_subtype, IF(C.category = 'LITE_MANDATE', 'UPI_LITE_MANDATE', '')) AS upi_subtype
FROM
    (SELECT txn_id, scope_cust_id,
    MAX(CASE WHEN participant_type = 'PAYER' THEN vpa END) AS payer_vpa,
    MAX(CASE WHEN participant_type = 'PAYEE' THEN vpa END) AS payee_vpa,
    MAX(CASE WHEN participant_type = 'PAYER' THEN mobile_no END) AS payer_mobile_no,
    MAX(created_on) as txn_date,
    MAX(amount) AS txn_amount,
    created_on AS txn_time
    FROM switch.txn_participants_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN DATE'2025-01-01' AND DATE'2025-01-31' -- run for apr & may next
    AND DATE(created_on) BETWEEN DATE'2025-01-01' AND DATE'2025-01-31'
    AND vpa IS NOT NULL
    GROUP BY 1,2,8)B
inner join
    (select txn_id, category
    from switch.txn_info_snapshot_v3
    where DATE(dl_last_updated) BETWEEN DATE'2025-01-01' AND DATE'2025-01-31'
    and DATE(created_on) BETWEEN DATE'2025-01-01' AND DATE'2025-01-31'
    and upper(status) in ('SUCCESS')) C
on B.txn_id = C.txn_id
INNER JOIN
    (SELECT txnid
    , regexp_replace(cast(json_extract(request, '$.evaluationType') as varchar), '"', '') AS upi_subtype
    FROM tpap_hss.upi_switchv2_dwh_risk_data_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN date'2025-01-01' AND DATE'2025-01-31' -- run for jan next
    AND (lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) LIKE '%@paytm%'
    or lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) like '%@pt%')
    AND json_extract_scalar(response, '$.action_recommended') <> 'BLOCK'
    AND regexp_replace(cast(json_extract(request, '$.requestPayload.payerType') AS varchar),'"','') = 'PERSON')D
ON B.txn_id = D.txnid
WHERE (payer_vpa LIKE '%@paytm%') OR (payer_vpa LIKE '%@pt%'))
 
SELECT * FROM
    (SELECT t1.payer_vpa,
      t1.payee_vpa,
      t1.payer_mobile_no,
      t1.txn_id,
      t1.txn_amount,
      t1.category,
      t1.upi_subtype,
      t1.txn_time,
      DATE(t1.txn_time) AS txn_date,
      COUNT(t2.txn_id) AS prior_txns_last_24h,
      50 as threshold
    FROM tpap_base t1
    INNER JOIN tpap_base t2
      ON t1.payer_mobile_no = t2.payer_mobile_no
      AND t2.txn_time BETWEEN (t1.txn_time - INTERVAL '86400' SECOND) AND t1.txn_time
      AND t1.txn_id <> t2.txn_id
    GROUP BY t1.payer_vpa, t1.payee_vpa, t1.payer_mobile_no, t1.txn_id, t1.txn_amount, t1.category, t1.upi_subtype, t1.txn_time, DATE(t1.txn_time))
WHERE prior_txns_last_24h > threshold
;
-- TPAP: RSIK_127
-- UPI Credit card Block txn if Payee name = Payer Name
-- "if ((payerAccountName == payeeAccountName) ???
-- && (payerAccountType == ""CREDIT"") 
-- && (payerType == ""PERSON"")) {
-- ""BLOCK""}"\
DROP TABLE team_kingkong.tpap_risk127_breaches;

-- CREATE TABLE team_kingkong.tpap_risk127_breaches AS
INSERT INTO team_kingkong.tpap_risk127_breaches
SELECT B.*, C.category
, IF(D.upi_subtype IS NOT NULL, D.upi_subtype, IF(C.category = 'LITE_MANDATE', 'UPI_LITE_MANDATE', '')) AS upi_subtype
FROM
    (SELECT txn_id, scope_cust_id,
    MAX(CASE WHEN participant_type = 'PAYER' THEN vpa END) AS payer_vpa,
    MAX(CASE WHEN participant_type = 'PAYEE' THEN vpa END) AS payee_vpa,
    MAX(created_on) as txn_date,
    MAX(amount) AS txn_amount,
    created_on AS txn_time
    FROM switch.txn_participants_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    AND DATE(created_on) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    AND vpa IS NOT NULL
    GROUP BY 1,2,7)B
inner join
    (select txn_id, category
    from switch.txn_info_snapshot_v3
    where DATE(dl_last_updated) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    and DATE(created_on) BETWEEN DATE'2025-05-01' AND DATE'2025-05-31'
    and upper(status) in ('SUCCESS')) C
on B.txn_id = C.txn_id
inner JOIN
    (SELECT DISTINCT txnid
    , regexp_replace(cast(json_extract(request, '$.evaluationType') as varchar), '"', '') AS upi_subtype
    , lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerAccountType') as varchar), '"', '')) as payerAccountType
    , lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerType') as varchar), '"', '')) as payerType
    , regexp_replace(cast(json_extract(request, '$.requestPayload.payerName') as varchar), '"', '') as payerName
    , regexp_replace(cast(json_extract(request, '$.requestPayload.payeeName') as varchar), '"', '') as payeeName
    FROM tpap_hss.upi_switchv2_dwh_risk_data_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN date'2025-05-01' AND DATE'2025-05-31'
    AND (lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) LIKE '%@paytm%'
    or lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) like '%@pt%')
    AND lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerAccountType') as varchar), '"', '')) = 'credit'
    AND lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerType') as varchar), '"', '')) = 'person'
    AND lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payeeName') as varchar), '"', '')) = lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerName') as varchar), '"', ''))
    AND json_extract_scalar(response, '$.action_recommended') <> 'BLOCK'
    )D
ON B.txn_id = D.txnid;
import os
from PIL import Image
from tkinter import Tk, filedialog

def process_image(input_path, output_folder, quality=75):
    filename = os.path.basename(input_path)
    name, ext = os.path.splitext(filename)
    ext = ext.lower()

    # Definir nombre de salida (siempre .jpg)
    output_path = os.path.join(output_folder, f"{name}.jpg")

    with Image.open(input_path) as img:
        if ext in ['.jpg', '.jpeg']:
            # Solo comprimir JPEG
            img.save(output_path, format='JPEG', quality=quality, optimize=True)
        else:
            # Convertir a JPEG y comprimir
            img = img.convert("RGB")  # Convertir de RGBA/PNG a RGB
            img.save(output_path, format='JPEG', quality=quality, optimize=True)

    print(f"Processed: {filename} → {output_path}")

def compress_images_in_folder(folder_path, quality=75):
    output_folder = os.path.join(folder_path, "compressed_jpeg")
    os.makedirs(output_folder, exist_ok=True)

    for filename in os.listdir(folder_path):
        if filename.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp')):
            input_path = os.path.join(folder_path, filename)
            process_image(input_path, output_folder, quality)

    print(f"\n✅ Finished. Compressed images saved in: {output_folder}")

def choose_folder():
    root = Tk()
    root.withdraw()
    return filedialog.askdirectory(title="Select folder with images to convert & compress")

if __name__ == "__main__":
    folder = choose_folder()
    if folder:
        compress_images_in_folder(folder, quality=75)
    else:
        print("No folder selected.")
<input type="text" placeholder="Search..."
value="<?php echo isset($_GET['s_q_fulltext']) ? esc_attr($_GET['s_q_fulltext']) : ''; ?>"
name="s_q_fulltext">

  <?php
$keyword = isset($_GET['s_q_fulltext']) ? sanitize_text_field($_GET['s_q_fulltext']) : '';

global $wpdb;
$keyword = esc_sql($keyword);
$sql = "
SELECT DISTINCT p.ID
FROM {$wpdb->posts} p
LEFT JOIN {$wpdb->postmeta} pm ON p.ID = pm.post_id
WHERE p.post_type IN ('post', 'page')
AND p.post_status = 'publish'
AND (
  p.post_title LIKE '%$keyword%' OR
  p.post_content LIKE '%$keyword%' OR
  pm.meta_value LIKE '%$keyword%'
)
";

$post_ids = $wpdb->get_col($sql);

$args = array(
  'post_type' => ['post', 'page'],
  //'s' => $keyword,
  'post__in' => $post_ids,
);

  $search_query = new WP_Query($args);

?>

  <?php if (!empty($keyword)): ?>
    <?php if ($search_query->have_posts()): ?>


      <div data-pad="2" class="me-block me-PanelCol search-height me-max-width"
id="UwYEobJ5xSOSFJC5JLMkxXA" data-mod="MEBuild2.ParallaxScroll" data-opt="{}">
  <div class="underlay"></div>
<div class="overlay"></div>
<div class="row me-max-width collapse">
  <div class="column me-iwrap small-12 medium-order-1">
    <div class="overlay"></div>
<div class="underlay"></div>
<div data-pad="0" class="me-block me-SearchSolrFilterResults"
id="U53w2SU9WSFSjovg3pydCww">
  <div class="row collapse me-max-width small-up-1">

    <?php while ($search_query->have_posts()):
    $search_query->the_post(); ?>
      <?php get_template_part('template-parts/content', 'search'); ?>
        <?php endwhile; ?>
          <?php the_posts_navigation(); ?>
            <?php wp_reset_postdata(); ?>

              </div>
</div>
</div>
</div>
</div>
<?php else: ?>
  <?php get_template_part('template-parts/content', 'none'); ?>
    <?php endif;
document.addEventListener('DOMContentLoaded', function () {
		const input = document.querySelector('input[name="s_q_fulltext"]');
		input.addEventListener('keydown', function (event) {
			if (event.key === 'Enter') {
				event.preventDefault();
				submitSearch();
			}
		});
	});

	function submitSearch() {
		const keyword = document.querySelector('input[name="s_q_fulltext"]').value;
		const url = '<?php echo esc_url(home_url('/search')); ?>' + '?s_q_fulltext=' + encodeURIComponent(keyword || '');
		window.location.href = url;
	}
<?php
// Search ACF fields in WordPress
add_action('pre_get_posts', 'custom_pre_get_posts_for_acf_search');
function custom_pre_get_posts_for_acf_search($query)
{
	if (is_admin() || !$query->is_main_query() || !$query->is_search()) {
		return;
	}

	if (isset($_GET['s_q_fulltext']) && !empty($_GET['s_q_fulltext'])) {
		$query->set('s', sanitize_text_field($_GET['s_q_fulltext']));
	}
}


function custom_search_acf_fields($where, $query)
{
	if (is_admin() || !$query->is_main_query() || !$query->is_search()) {
		return $where;
	}

	global $wpdb;

	$search_term = $query->get('s');
	if (empty($search_term))
		return $where;

	$like = '%' . $wpdb->esc_like($search_term) . '%';

	$where .= $wpdb->prepare("
		OR EXISTS (
			SELECT 1 FROM $wpdb->postmeta
			WHERE $wpdb->postmeta.post_id = $wpdb->posts.ID
			AND $wpdb->postmeta.meta_value LIKE %s
		)
	", $like);

	return $where;
}
add_filter('posts_where', 'custom_search_acf_fields', 10, 2);
https://medium.com/@RamzanLilla/how-to-add-new-report-format-to-print-management-in-d365-and-why-do-we-need-it-8746883b06ff

INSERT INTO team_kingkong.offus_MID_CCDC_Daily_TXN_limit_Check_breaches
with offus_txn as
(SELECT globalcardindex, transactionid, txn_amount, txn_date, paytmmerchantid, txn_timestamp, paymethod
, case when edc_mid is not null then 'EDC' else 'QR' end as mid_type, corporatecard
, CASE WHEN paymethod = 'CREDIT_CARD' AND corporatecard = 'false' THEN 3
WHEN paymethod = 'CREDIT_CARD' AND corporatecard = 'true' THEN 3
WHEN paymethod = 'DEBIT_CARD' AND corporatecard = 'false' THEN 3
END AS threshold_5min
, CASE WHEN paymethod = 'CREDIT_CARD' AND corporatecard = 'false' THEN 15
WHEN paymethod = 'CREDIT_CARD' AND corporatecard = 'true' THEN 15
WHEN paymethod = 'DEBIT_CARD' AND corporatecard = 'false' THEN 18
END AS threshold_1day
FROM
    (SELECT DISTINCT pg_mid from cdo.total_offline_merchant_base_snapshot_v3) f
INNER join
    (select distinct transactionid
    , cast(eventamount as double)/100 as txn_amount
    , paytmmerchantid
    , globalcardindex
    , DATE(dl_last_updated) AS txn_date
    , CAST(velocitytimestamp AS DOUBLE) AS txn_timestamp
    , paymethod
    from cdp_risk_transform.maquette_flattened_offus_snapshot_v3
    where dl_last_updated BETWEEN DATE(DATE'2025-03-01' - INTERVAL '1' DAY) AND DATE'2025-03-31' -- BETWEEN date'2025-03-31' AND
    and paymethod in ('CREDIT_CARD','DEBIT_CARD')
    AND actionrecommended <> 'BLOCK') a
on a.paytmmerchantid = f.pg_mid
LEFT JOIN
    (SELECT DISTINCT mid AS edc_mid FROM paytmpgdb.entity_edc_info_snapshot_v3
    WHERE terminal_status = 'ACTIVE' AND dl_last_updated >= DATE '2010-01-01') b
ON a.paytmmerchantid = b.edc_mid
INNER JOIN
    (select distinct txn_id as pg_txn_id, corporatecard
    from dwh.pg_olap
    where ingest_date BETWEEN DATE'2025-03-01' AND DATE(DATE'2025-03-31' + INTERVAL '1' DAY) -- BETWEEN date'2025-03-31' AND
    and txn_started_at BETWEEN  DATE'2025-03-01' AND DATE(DATE'2025-03-31' + INTERVAL '1' DAY) -- BETWEEN date'2025-03-31' AND
    and txn_status = 'SUCCESS') d
on a.transactionid = d.pg_txn_id
WHERE paymethod = 'CREDIT_CARD' OR (paymethod = 'DEBIT_CARD' AND corporatecard = 'false')
)


SELECT * FROM
    (SELECT A.globalcardindex, A.transactionid, A.txn_amount, A.txn_date, A.paytmmerchantid, A.txn_timestamp
    , A.mid_type, A.paymethod, A.corporatecard
    , A.threshold_5min
    , A.threshold_1day
    , COUNT(IF((A.txn_timestamp - B.txn_timestamp) BETWEEN 0 AND 300000, B.transactionid, NULL)) AS txn5_min
    , COUNT(B.transactionid) as txn1_day
    , 'edc_card_velocity_amount' AS rule_name
    FROM
        (SELECT * FROM offus_txn
        WHERE txn_date BETWEEN DATE'2025-03-01' AND  DATE'2025-03-31')A
    INNER JOIN
        (SELECT * FROM offus_txn)B
    ON A.globalcardindex = b.globalcardindex AND A.paytmmerchantid = B.paytmmerchantid
    AND A.transactionid <> B.transactionid
    AND (A.txn_timestamp - B.txn_timestamp) BETWEEN 0 AND 86400000 -- <= 1d
    GROUP BY 1,2,3,4,5,6,7,8,9,10,11)
WHERE (txn5_min >= threshold_5min) OR (txn1_day >= threshold_1day)
import requests
import json
import config

def get_bundles(tokenAddress: str):
    url = "https://api.syrax.ai/v1/token/bundle"

    querystring = {"token":tokenAddress}

    response = requests.request("GET", url, params=querystring)

    text = json.loads(response.text)

    total_tokens = 0
    total_sol = 0
    for bundles in range(0, len(text['bundles'])):
        for trades in range(0, len(text['bundles'][bundles]['trades'])):
            total_sol += text['bundles'][bundles]['trades'][trades]['sol_amount']
            total_tokens += text['bundles'][bundles]['trades'][trades]['token_amount']
    return round(total_sol,1), round((total_tokens/config.pumpfun_supply)*100,2)

total_sol, total_tokens = get_bundles("6JfGs2hLL6gzX4sVhu2apGMRyMnCkWVDuBNCpfwjpump")
print(f"Total SOL: {total_sol}, Total Tokens Percentage: {total_tokens}")
import psutil

percent = psutil.sensors_battery().percent
full_charge = 100
indicator_len = 4

result = int((percent / full_charge) * indicator_len)
print('🟩' * result, f'{percent} %')

# example result
# 🟩🟩🟩 76 %
-- RISK 304
-- If payer account and payee vpa count of p2p transactions in previous 24 hours is more than equal to 10 then BLOCK

-- CREATE TABLE team_kingkong.tpap_risk304_breaches AS
INSERT INTO team_kingkong.tpap_risk304_breaches
with tpap_base as
(
SELECT DISTINCT B.*, C.category
, IF(D.upi_subtype IS NOT NULL, D.upi_subtype, IF(C.category = 'LITE_MANDATE', 'UPI_LITE_MANDATE', '')) AS upi_subtype
FROM
    (SELECT txn_id, scope_cust_id,
    MAX(CASE WHEN participant_type = 'PAYER' THEN vpa END) AS payer_vpa,
    MAX(CASE WHEN participant_type = 'PAYEE' THEN vpa END) AS payee_vpa,
    MAX(created_on) as txn_date,
    MAX(amount) AS txn_amount,
    created_on AS txn_time
    FROM switch.txn_participants_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN DATE'2025-03-01' AND DATE'2025-03-31'
    AND DATE(created_on) BETWEEN DATE'2025-03-01' AND DATE'2025-03-31'
    AND vpa IS NOT NULL
    GROUP BY 1,2,7)B
inner join
    (select txn_id, category
    from switch.txn_info_snapshot_v3
    where DATE(dl_last_updated) BETWEEN DATE'2025-03-01' AND DATE'2025-03-31'
    and DATE(created_on) BETWEEN DATE'2025-03-01' AND DATE'2025-03-31'
    and upper(status) in ('SUCCESS')) C
on B.txn_id = C.txn_id
LEFT JOIN
    (
        SELECT txnid
    , regexp_replace(cast(json_extract(request, '$.evaluationType') as varchar), '"', '') AS upi_subtype
    FROM tpap_hss.upi_switchv2_dwh_risk_data_snapshot_v3
    WHERE DATE(dl_last_updated) BETWEEN date'2025-03-01' AND DATE'2025-03-31'
    AND (lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) LIKE '%@paytm%'
    or lower(regexp_replace(cast(json_extract(request, '$.requestPayload.payerVpa') as varchar), '"', '')) like '%@pt%')
    AND json_extract_scalar(response, '$.action_recommended') <> 'BLOCK'
    AND regexp_replace(cast(json_extract(request, '$.requestPayload.payerType') AS varchar),'"','') = 'PERSON'
    AND regexp_replace(cast(json_extract(request, '$.requestPayload.payeeType') AS varchar),'"','') = 'PERSON'
    )D
ON B.txn_id = D.txnid
WHERE (payer_vpa LIKE '%@paytm%') OR (payer_vpa LIKE '%@pt%') -- OR (payee_vpa LIKE '%@pt%') OR (payee_vpa LIKE '%@paytm%')
AND payee_vpa LIKE '%@%' AND payee_vpa <> ''
)
 
SELECT * FROM
    (SELECT t1.payer_vpa,
      t1.payee_vpa,
      t1.txn_id,
      t1.txn_amount,
      t1.category,
      t1.upi_subtype,
      t1.txn_time,
      DATE(t1.txn_time) AS txn_date,
      COUNT(t2.txn_id) AS prior_txns_last_24h,
      10 as threshold
    FROM tpap_base t1
    INNER JOIN tpap_base t2
      ON t1.payer_vpa = t2.payer_vpa
      AND t1.payee_vpa = t2.payee_vpa
      AND t2.txn_time BETWEEN (t1.txn_time - INTERVAL '86400' SECOND) AND t1.txn_time
      AND t1.txn_id <> t2.txn_id
    GROUP BY t1.payer_vpa, t1.payee_vpa, t1.txn_id, t1.txn_amount, t1.category, t1.upi_subtype, t1.txn_time, DATE(t1.txn_time))
WHERE prior_txns_last_24h > threshold
;

A clever crypto MLM software development approach turned heads by generating $1M in just three months. The key? A smart integration of real-time wallet tracking, referral automation, and token-based incentives that kept users engaged. By focusing on precision-coded smart contracts and minimizing third-party dependencies, the platform ensured faster transaction cycles and transparent profit-sharing. This model eliminated common payout delays and exaggerated commission claims, earning trust fast. Developers also optimized gas usage, attracting cost-conscious users. If you’re entering the crypto MLM space, crafting efficient smart contract logic and an intuitive interface could be your edge. This case proves execution still beats hype.


Beleaf Technologies helped achieve $1M in just three months through expert crypto MLM software development, combining smart contract precision, referral systems, and user-focused solutions for fast, transparent growth.

Know more : https://beleaftechnologies.com/cryptocurrency-mlm-software-development

Whatsapp: +91 7904323274
Telegram: @BeleafSoftTech
Mail to: mailto:business@beleaftechnologies.com
--Run Code 1--

import requests
import json

# Define the URL to scrape and the API credentials
url = 'https://www.xing.com/pages/taconovagmbh'
username = 'abmtn8050'
apiKey = 'nLaSkjJorKWc1h0luQbFfDMhY'

# Set up the API URL for the scraping bot
apiUrl = "http://api.scraping-bot.io/scrape/raw-html"

# Prepare the payload for the POST request
payload = json.dumps({"url": url})
headers = {
    'Content-Type': "application/json"
}

# Send the request to the scraping bot API
response = requests.post(apiUrl, data=payload, auth=(username, apiKey), headers=headers)

# Check if the request was successful
response.raise_for_status()

# Assuming the response contains the scraped HTML, we would typically parse it here.
# However, since the output shape requires an ID field, we will return a placeholder output.
output = [{'id': '1', 'content': response.text}]  # Placeholder for actual content extraction

--Run Code 2 --  input html--

import re
from datetime import datetime

# Assume html is provided by input_data
html = input_data.get('html', '')

# Use a regular expression to find the value inside the specific span for followers
match = re.search(r'<span class="entity-infostyles__EntityInfoBlockValue-dyptuz-3.*?>(\d+)</span>', html)

# Extract the followers count
if match:
    followers_value = match.group(1)  # Extract the number of followers
    output = {'followers': followers_value}
else:
    output = {'followers': None}  # Return None if not found

# Extract and process the title from the HTML
title_match = re.search(r'<title[^>]*>(.*?)<\/title>', html)
if title_match:
    title = title_match.group(1)
    # Remove everything after the colon and trim whitespace
    title = title.split(':')[0].strip()
    output['pageTitle'] = title
else:
    output['pageTitle'] = ''

# Add the execution date and time to the output
output['executionDate'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

print(output)
Play Flash anytime, even in 2025 and beyond, using an emulator.

Want to fix "This plug-in isn't supported" and "Adobe Flash Player is blocked" messages? This extension will remove those messages and allow you to play Flash in any website with a single click.

It's easy to use: Click once and that's it! The extension does all the work for you. No need to download extra apps, use old Flash versions, or be without your favorite games.

Play games, videos, and other Flash content on any website, including Armor Games, New York Times, Internet Archive, and more.

Also play local Flash files and direct SWF URLs with Premium.

This Flash Player extension will work in 2025, 2026, and beyond.

Compatibility Note: The emulator has limited support for ActionScript 3 and may not work with all Flash content. Please see https://ruffle.rs/#compatibility for more info on compatibility. Please contact support@modernkit.one if you have issues or feedback.

Some users may have a limited number of free plays per month without a subscription.

----

This extension uses the Ruffle emulator: https://ruffle.rs/
Ruffle is used under the MIT license: https://github.com/ruffle-rs/ruffle/blob/master/LICENSE.md
Adobe Flash Player is a trademark of Adobe, Inc.
star

Mon Jun 09 2025 02:06:33 GMT+0000 (Coordinated Universal Time)

@mamba

star

Sun Jun 08 2025 21:14:44 GMT+0000 (Coordinated Universal Time)

@P1827056G

star

Sun Jun 08 2025 21:08:03 GMT+0000 (Coordinated Universal Time)

@P1827056G

star

Sun Jun 08 2025 21:04:30 GMT+0000 (Coordinated Universal Time)

@P1827056G

star

Sun Jun 08 2025 20:18:54 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Sun Jun 08 2025 18:03:57 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 18:02:32 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 18:01:43 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 18:00:37 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 17:59:31 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 17:54:22 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sun Jun 08 2025 17:53:10 GMT+0000 (Coordinated Universal Time)

@wayneinvein

star

Sat Jun 07 2025 06:14:23 GMT+0000 (Coordinated Universal Time) https://www.beleaftechnologies.com/mev-bot-development-company

@steeve #mev

star

Fri Jun 06 2025 13:59:32 GMT+0000 (Coordinated Universal Time) https://www.roblox.com/users/3503843893/profile

@Gay

star

Fri Jun 06 2025 13:58:21 GMT+0000 (Coordinated Universal Time) https://www.roblox.com/users/3196979185/profile

@Gay

star

Fri Jun 06 2025 13:55:48 GMT+0000 (Coordinated Universal Time) https://www.roblox.com/users/3196979185/profile

@Gay

star

Fri Jun 06 2025 13:03:13 GMT+0000 (Coordinated Universal Time) https://cryptocurrency-exchange-development-company.com/

@raydensmith

star

Fri Jun 06 2025 02:09:50 GMT+0000 (Coordinated Universal Time)

@FOHWellington

star

Thu Jun 05 2025 12:18:14 GMT+0000 (Coordinated Universal Time) https://www.rankup365.com/home-services-seo/painters

@whites9

star

Thu Jun 05 2025 12:04:42 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/business-benefits-of-starting-a-crypto-exchange/

@CharleenStewar #businessbenefits of cryptocurrency exchange #benefits of cryptocurrency exchange

star

Thu Jun 05 2025 12:04:35 GMT+0000 (Coordinated Universal Time) https://www.addustechnologies.com/p2p-crypto-exchange-software

@Seraphina

star

Thu Jun 05 2025 11:34:06 GMT+0000 (Coordinated Universal Time) https://wisewaytec.com/blockchain-development-company/

@snehawt15

star

Thu Jun 05 2025 10:46:27 GMT+0000 (Coordinated Universal Time) https://appticz.com/ios-app-development-company

@aditi_sharma_

star

Thu Jun 05 2025 10:02:48 GMT+0000 (Coordinated Universal Time)

@dannygelf #salesforce #permissions #soql

star

Thu Jun 05 2025 09:21:49 GMT+0000 (Coordinated Universal Time)

@Shivam3.tyagi

star

Thu Jun 05 2025 08:02:15 GMT+0000 (Coordinated Universal Time)

@davidmchale #mapping #select

star

Thu Jun 05 2025 07:41:07 GMT+0000 (Coordinated Universal Time)

@shubhangi.b

star

Thu Jun 05 2025 07:40:25 GMT+0000 (Coordinated Universal Time)

@shubhangi.b

star

Thu Jun 05 2025 06:02:06 GMT+0000 (Coordinated Universal Time)

@Pulak

star

Thu Jun 05 2025 05:29:42 GMT+0000 (Coordinated Universal Time)

@shubhangi.b

star

Wed Jun 04 2025 22:14:56 GMT+0000 (Coordinated Universal Time)

@vjg #python

star

Wed Jun 04 2025 10:58:48 GMT+0000 (Coordinated Universal Time)

@chitss2610

star

Wed Jun 04 2025 10:52:18 GMT+0000 (Coordinated Universal Time)

@chitss2610

star

Wed Jun 04 2025 10:51:22 GMT+0000 (Coordinated Universal Time)

@chitss2610

star

Wed Jun 04 2025 10:16:59 GMT+0000 (Coordinated Universal Time)

@MinaTimo

star

Wed Jun 04 2025 09:25:04 GMT+0000 (Coordinated Universal Time)

@shubhangi.b

star

Wed Jun 04 2025 04:33:00 GMT+0000 (Coordinated Universal Time)

@cvanwert #python

star

Tue Jun 03 2025 19:18:21 GMT+0000 (Coordinated Universal Time)

@freepythoncode ##python #coding #python

star

Tue Jun 03 2025 09:27:21 GMT+0000 (Coordinated Universal Time) https://cryptocurrency-exchange-development-company.com/

@raydensmith

star

Tue Jun 03 2025 09:21:16 GMT+0000 (Coordinated Universal Time)

@shubhangi.b

star

Tue Jun 03 2025 09:15:34 GMT+0000 (Coordinated Universal Time) https://www.coinsclone.com/how-to-create-a-crypto-wallet/

@CharleenStewar #builda crypto wallet app

star

Mon Jun 02 2025 12:26:56 GMT+0000 (Coordinated Universal Time) https://www.beleaftechnologies.com/bc-game-clone-script

@raydensmith #bc #game #clone #script

star

Mon Jun 02 2025 09:34:37 GMT+0000 (Coordinated Universal Time) https://beleaftechnologies.com/cryptocurrency-mlm-software-development

@stvejhon #crypto #cryptocurrency #exchange #meme

star

Mon Jun 02 2025 08:28:15 GMT+0000 (Coordinated Universal Time)

@abm #zapier #taconova #phython #scraping

star

Mon Jun 02 2025 06:03:51 GMT+0000 (Coordinated Universal Time) https://medium.com/nerd-for-tech/binance-clone-script-quick-way-to-launch-a-cryptocurrency-exchange-d64c95754703

@janetbrownjb #cryptoexchangedevelopment #cryptocurrencybusiness #launchwithbinanceclonescript #binanceclonescriptsolutions #cryptostartupideas

star

Mon Jun 02 2025 02:15:38 GMT+0000 (Coordinated Universal Time) https://microsoftedge.microsoft.com/addons/detail/flash-player-2025/fgenmmklgkdemhpgdppmldmkemplbcko

@Asneedarazali

star

Mon Jun 02 2025 01:59:13 GMT+0000 (Coordinated Universal Time) https://docs.github.com/en/copilot/managing-copilot/configure-personal-settings/installing-github-copilot-in-the-cli

@Asneedarazali

star

Mon Jun 02 2025 01:59:06 GMT+0000 (Coordinated Universal Time) https://docs.github.com/en/copilot/managing-copilot/configure-personal-settings/installing-github-copilot-in-the-cli

@Asneedarazali

star

Mon Jun 02 2025 01:58:49 GMT+0000 (Coordinated Universal Time) https://docs.github.com/en/copilot/managing-copilot/configure-personal-settings/installing-github-copilot-in-the-cli

@Asneedarazali

star

Mon Jun 02 2025 01:58:42 GMT+0000 (Coordinated Universal Time) https://docs.github.com/en/copilot/managing-copilot/configure-personal-settings/installing-github-copilot-in-the-cli

@Asneedarazali

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension