Snippets Collections
# Defining a method for collecting the urls we want to scrape
def scrape_perfume_links(url):
    headers = {
        "User-Agent": "Insert user agent",
        "Referer": "https://www.parfumo.com/",
        "Accept-Language": "en-US,en;q=0.9"
    }
    session = requests.Session()
    page = session.get(url, headers=headers)
    page.raise_for_status()
    soup3 = BeautifulSoup(page.content, 'html.parser')
    
    perfumes = soup3.find_all('div', class_='name')
    perfume_links = []
    for perfume in perfumes:
        link = perfume.find('a')['href']
        perfume_links.append(link)

    return perfume_links
# Defining a method for scraping the web page
def scrape_perfume_data(url):
    headers = {
        "User-Agent": "Insert user agent"
    }
    page = requests.get(url, headers=headers)
    page.raise_for_status()
    soup = BeautifulSoup(page.content, 'html.parser')
    
    perfume_name = brand_name = release_year = overall_rating = rating_count = perfumer = scent_rating = longevity_rating = sillage_rating = bottle_rating = value_rating = scraping_date = None
    
    try:
        perfume_name = soup.find('h1', class_='p_name_h1', itemprop='name').get_text().strip().split('\n')[0].strip()
    except AttributeError:
        pass
    
    try:
        brand_span = soup.find('span', itemprop='brand')
        brand_name = brand_span.find('span', itemprop='name').get_text().strip()
    except AttributeError:
        pass
    
    try:
        year = soup.find('a', href=lambda href: href and 'Release_Years' in href)
        release_year = year.get_text().strip()
    except AttributeError:
        pass
    
    try:
        overall_rating = soup.find('span', itemprop='ratingValue').get_text().strip()
    except AttributeError:
        pass
    
    try:
        rating_count = soup.find('span', itemprop='ratingCount').get_text().strip()
    except AttributeError:
        pass
    
    try:
        perfumer = soup.find('div', {'class': 'w-100 mt-0-5 mb-3'}).get_text().strip()
    except AttributeError:
        pass
    
    try:
        top_notes = soup.find('div', class_='pyramid_block nb_t w-100 mt-2')
        top_note_list = [span.get_text(strip=True) for span in top_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        heart_notes = soup.find('div', class_='pyramid_block nb_m w-100 mt-2')
        heart_note_list = [span.get_text(strip=True) for span in heart_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        base_notes = soup.find('div', class_='pyramid_block nb_b w-100 mt-2')
        base_note_list = [span.get_text(strip=True) for span in base_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
   
    
    scraping_date = datetime.date.today()
    
    return {
        'PerfumeName': perfume_name, 
        'Brand': brand_name, 
        'ReleaseYear': release_year, 
        'OverallRating': overall_rating, 
        'RatingCount': rating_count, 
        'Perfumer': perfumer, 
        'TopNotes': top_note_list if 'top_note_list' in locals() else None, 
        'HeartNotes': heart_note_list if 'heart_note_list' in locals() else None, 
        'BaseNotes': base_note_list if 'base_note_list' in locals() else None, 
    }
Custom Extraction >> Regex

["']datePublished["']: *["'](.*?)["']
["']dateModified["']: *["'](.*?)["']
class Solution {
public:
    string frequencySort(string s) {
        map<char,int> m1;
        multimap<int,char> m2;
        map<char,int> :: iterator it;
        multimap<int,char>::iterator it1;
        string str = "";
        for(int i=0;i<s.length();++i)
         m1[s[i]]++;
        for(it=m1.begin();it!=m1.end();++it)
        m2.insert({it->second,it->first});
        for(it1=m2.begin();it1!=m2.end();++it1)
         {
            for(int j=it1->first;j>0;--j)
            {str.push_back(it1->second);}
         }
         reverse(str.begin(),str.end());
         return str;
     }
};
[
    {
        "$match": {
            "userStatus": "ACTIVE",
            "partnerShortCode": {
                "$in": [
                    "CHC",
                    "DRB",
                    "DRL",
                    "DUMMY",
                    "GLOBANT",
                    "IHL",
                    "JIVA",
                    "MINDTREE",
                    "MM",
                    "SHUDDHI",
                    "SMITFIT",
                    "SUDLIFE",
                    "test201",
                    "wipro"
                ]
            }
        }
    },
    {
        "$lookup": {
            "from": "generatedparticipantdata",
            "localField": "email",
            "foreignField": "email",
            "as": "generatedParticipantDataLookupLkp"
        }
    },
    {
        "$unwind": {
            "path": "$generatedParticipantDataLookupLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$match": {}
    },
    {
        "$lookup": {
            "from": "subscription",
            "localField": "_id",
            "foreignField": "userId",
            "as": "subscriptionLkp"
        }
    },
    {
        "$project": {
            "subscriptionLkp": {
                "$arrayElemAt": [
                    {
                        "$filter": {
                            "input": "$subscriptionLkp",
                            "as": "subscription",
                            "cond": {
                                "$eq": [
                                    "$$subscription.active",
                                    true
                                ]
                            }
                        }
                    },
                    0
                ]
            },
            "partnerShortCode": 1,
            "firstName": 1,
            "_id": 1,
            "middleName": 1,
            "lastName": 1,
            "email": 1,
            "mobile": 1,
            "source": 1,
            "lastAppLaunchDate": "$userLkp.lastAppLaunchDate",
            "daysSpentOnApp": 1,
            "bmi": "$generatedParticipantDataLookupLkp.bmi",
            "profilePictureURL": 1,
            "startDate":{
                            "$arrayElemAt": [
                                "$subscriptionLkp.startDate",
                                0
                            ]
                        },
            "journeyDays": {
                "$dateDiff": {
                    "startDate": {
                        "$toDate": {
                            "$arrayElemAt": [
                                "$subscriptionLkp.startDate",
                                0
                            ]
                        }
                    },
                    "endDate": {
                        "$date": "2024-05-21T04:04:07.418Z"
                    },
                    "unit": "day"
                }
            }
        }
    },
    {
        "$unwind": {
            "path": "$subscriptionLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$lookup": {
            "from": "subscriptionPlan",
            "localField": "subscriptionLkp.planId",
            "foreignField": "_id",
            "as": "subscriptionPlanLkp"
        }
    },
    {
        "$unwind": {
            "path": "$subscriptionPlanLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$match": {}
    },
    {
        "$match": {
            "journeyDays": {
                "$gte": 200,
                "$lte": 300
            }
        }
    },
    {
        "$lookup": {
            "from": "users",
            "localField": "email",
            "foreignField": "email",
            "as": "userLkp"
        }
    },
    {
        "$unwind": {
            "path": "$userLkp",
            "preserveNullAndEmptyArrays": true
        }
    },
    {
        "$sort": {
            "_id": -1
        }
    },
    {
        "$project": {
            "name": "$subscriptionPlanLkp.name",
            "partnerShortCode": 1,
            "firstName": 1,
            "middleName": 1,
            "lastName": 1,
            "email": 1,
            "mobile": 1,
            "source": 1,
            "lastAppLaunchDate": "$userLkp.lastAppLaunchDate",
            "daysSpentOnApp": 1,
            "bmi": 1,
            "profilePictureURL": 1,
            "servicePartnerShortCode": "$userLkp.servicePartnerShortCode",
            "userVisibility": "$userLkp.userVisibility",
            "journeyDays": 1,
            "startDate":1
        }
    }
]
class Solution {
public:
    bool isIsomorphic(string s, string t) {
        string r;
        if(s.length()!=t.length())
        return false;
        map<char,char> m1,m2,m3;
        for(int i=0;i<s.length();++i)
        {
            m1.insert({s[i],t[i]});
            m2.insert({t[i],s[i]});
        }
        if(m1.size()>=m2.size())
        {m3=m2;
        r=s;}
        else
        {m3=m1;r=t;}
        for(int j=0;j<s.length();++j)
        {
            if(r[j]==m3[t[j]])
            continue;
            else
            return false;
        }
        return true;
        
    }
};
# Showcasing the data for cluster 0

cluster_0_df = df_trimmed[df_trimmed['ClustersK'] == 0]

variable_names = [col for col in cluster_0_df.columns if col != 'ClustersK']
colors = ['#2e2237']
n_variables = len(variable_names)
n_rows = (n_variables - 1) // 5 + 1
fig, axes = plt.subplots(n_rows, 5, figsize=(15, 3 * n_rows), squeeze=False)

for i, variable in enumerate(variable_names):
    row = i // 5
    col = i % 5
    ax = axes[row, col]
    cluster_0_df[variable].plot.hist(ax=ax, bins=20, color=colors)
    ax.set_title(f'Distribution of {variable}')
    ax.set_xlabel(variable)
    ax.set_ylabel('Frequency')

for i in range(n_variables, n_rows * 5):
    fig.delaxes(axes.flatten()[i])
plt.tight_layout()
plt.show()
# Defining the number of clusters (K)
num_clusters = 4

# Initialising and fitting the KMeans model
kmeans = KMeans(n_clusters = num_clusters, n_init = 10)
cluster_labels = kmeans.fit_predict(dim_ds)
df_trimmed['ClustersK'] = cluster_labels
# Initiating PCA to reduce dimensions aka features to 3 and transforming the data
dim_ds = pd.DataFrame(PCA(n_components = 3).fit_transform(df_scaled), columns = ['column1', 'column2', 'column3'])
dim_ds.describe().T
# Applying z-score normalisation
scaler = StandardScaler()

# Fitting the scaler to the data and transforming it
df_scaled = pd.DataFrame(scaler.fit_transform(df_trimmed), columns = df_trimmed.columns)
# Defining a spend column based on individual categories
df['TotalSpend'] = df['Wines'] + df['Fruits'] + df['Meat'] + df['Fish'] + df['Sweets'] + df['Gold']
# Fleshing out the data around parenthood
df['N_minors_home'] = df['Kidhome'] + df['Teenhome']
df['Parent'] = np.where(df.N_minors_home> 0, 1, 0)
# Cleaning the Education column
df['Education'] = df['Education'].replace({'Basic':'Sec school',
                                           '2n Cycle':'Masters', 
                                           'Graduation':'Bachelors', 
                                           'Master':'Masters', 
                                           'PhD':'Doctorate'})
# Cleaning the Marital status column
df['Marital_Status'] = df['Marital_Status'].replace({'Alone':'Single','Widow':'Widowed', 'Together':'Dating'})
df = df[~df['Marital_Status'].isin(['YOLO', 'Absurd'])]
# Exploring the unique values in the Marital status column
df['Marital_Status'].unique()
# Removing outliers
df = df[df['Income'] <= 200000]
df = df.dropna(subset=['Income'])
# Exploring the outliers for age 
customers_over_100 = df[df['Age'] > 100]
num_customers_over_100 = len(customers_over_100)

print('N of customers over the age of 100:', num_customers_over_100)
# Creating an age column based on the year of birth
current_year = dt.datetime.now().year
df['Age'] = current_year - df['Year_Birth']
-- Determining what factors were more prevalent across all collisions
SELECT 
    COUNT(*) AS TOTAL_COLLISIONS,
    (SUM(CASE WHEN INATTENTIONIND = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_INATTENTIONIND,
    (SUM(CASE WHEN UNDERINFL = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_UNDERINFL,
    (SUM(CASE WHEN SPEEDING = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_SPEEDING,
    (SUM(CASE WHEN HITPARKEDCAR = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_HITPARKEDCAR,
    (SUM(CASE WHEN POORDRIVINGCOND = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_POORDRIVINGCOND,
    (SUM(CASE WHEN NIGHTTIME = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_NIGHTTIME,
    (SUM(CASE WHEN intersection_related = 1 THEN 1 ELSE 0 END) * 100.0 / COUNT(*)) AS PERC_INTERSECT_RELATED
FROM PortfolioProject.dbo.SeattleCollision;
-- Examining the number of collisions that ended in injuries vs fatalities
SELECT 
    COUNT(*) AS TOTAL_COLLISIONS,
    SUM(CASE WHEN FATALITIES > 0 THEN 1 ELSE 0 END) AS TOTAL_FATAL_COLLISIONS,
    (SUM(CASE WHEN FATALITIES > 0 THEN 1.0 ELSE 0 END) / COUNT(*)) * 100.0 AS PERCENTAGE_FATAL_COLLISIONS,
    SUM(CASE WHEN INJURIES > 0 THEN 1 ELSE 0 END) AS TOTAL_INJURIES,
    (SUM(CASE WHEN INJURIES > 0 THEN 1.0 ELSE 0 END) / COUNT(*)) * 100.0 AS PERCENTAGE_INJURIES
FROM 
    PortfolioProject.dbo.SeattleCollision;
-- Calculating the number of collisions per year and n of people affected
SELECT
    YEAR(DATE_UPD) AS COLLISION_YEAR,
    COUNT(*) AS COLLISIONS_PER_YEAR,
    SUM(PERSONCOUNT) AS TOTAL_AFFECTED
FROM PortfolioProject.dbo.SeattleCollision
GROUP BY YEAR(DATE_UPD)
ORDER BY COLLISIONS_PER_YEAR DESC;
-- Adding more descriptive labels to the Severity column
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD SEVERITY nvarchar(255);
UPDATE PortfolioProject.dbo.SeattleCollision
SET SEVERITY = CASE 
		WHEN SEVERITYCODE = 3 THEN '3—fatality'
		WHEN SEVERITYCODE = 2 THEN '2—injury'
		WHEN SEVERITYCODE = 1 THEN '1—property damage'
		ELSE '0—unknown'
	END;
-- Creating a filter for Nighttime 
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD NIGHTTIME bit;
UPDATE PortfolioProject.dbo.SeattleCollision
SET NIGHTTIME = CASE 
		WHEN TIMEOFDAY = 'Daylight' THEN 0
		ELSE 1
	END;
-- Creating a function for determining Daylight vs Nighttime
CREATE FUNCTION FINDTIMEOFDAY(
    @CollisionDate DATE,
    @CollisionTime TIME)
RETURNS VARCHAR(8)
AS
BEGIN
    DECLARE @MONTHN INT;
    DECLARE @SUNRISE TIME;
    DECLARE @SUNSET TIME;
    SELECT @MONTHN = MONTH(@CollisionDate);
    SELECT @SUNRISE = SUNRISE, @SUNSET = SUNSET
    FROM PortfolioProject.dbo.SeattleSunriseSunset
    WHERE MONTHN = @MONTHN;
    RETURN CASE 
        WHEN @CollisionTime >= @SUNRISE AND @CollisionTime < @SUNSET THEN 'Daylight'
        ELSE 'Night'
    END;
END;
-- Creating a filter for good vs poor driving conditions
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD POORDRIVINGCOND bit;
UPDATE PortfolioProject.dbo.SeattleCollision
SET POORDRIVINGCOND = CASE 
		WHEN WEATHER = 'Clear' AND ROADCOND = 'Dry' THEN 0
		ELSE 1
	END;
-- Converting the values from the 'time' column into readable time formats
ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD N_TIME TIME;

UPDATE PortfolioProject.dbo.SeattleCollision
SET N_TIME = CAST(DATEADD(MINUTE, TIME * 60, '00:00') AS TIME);

ALTER TABLE PortfolioProject.dbo.SeattleCollision
ADD TIME_UPD NVARCHAR(255);

UPDATE PortfolioProject.dbo.SeattleCollision
SET TIME_UPD = CONVERT(NVARCHAR(255), N_TIME, 108);
-- Identifying the total number of collisions
SELECT count(*)
FROM PortfolioProject.dbo.SeattleCollision
-- Ordering the columns, filtering out low rating counts and missing isbn rows and non-sci-fi books
SELECT id, isbn, rating_100, rating_count, numberofreviews, title, author, publishing_year, genre_science_fiction
FROM PortfolioProject.dbo.Goodreads
WHERE genre_science_fiction ='Yes' AND rating_count > 30 AND isbn IS NOT NULL
ORDER BY rating DESC
UPDATE PortfolioProject.dbo.Goodreads
SET genre_fantasy = CASE WHEN genres LIKE '%fantasy%' THEN 'Yes' ELSE 'No' END,
    genre_romance = CASE WHEN genres LIKE '%romance%' THEN 'Yes' ELSE 'No' END,
    genre_young_adult = CASE WHEN genres LIKE '%young adult%' THEN 'Yes' ELSE 'No' END,
    genre_paranormal = CASE WHEN genres LIKE '%paranormal%' THEN 'Yes' ELSE 'No' END,
    genre_classics = CASE WHEN genres LIKE '%classics%' THEN 'Yes' ELSE 'No' END,
    genre_science_fiction = CASE WHEN genres LIKE '%science fiction%' THEN 'Yes' ELSE 'No' END,
    genre_mystery = CASE WHEN genres LIKE '%mystery%' THEN 'Yes' ELSE 'No' END,
    genre_childrens = CASE WHEN genres LIKE '%childrens%' THEN 'Yes' ELSE 'No' END,
    genre_adventure = CASE WHEN genres LIKE '%adventure%' THEN 'Yes' ELSE 'No' END;
-- Creating filter columns based on the genre hits
ALTER TABLE PortfolioProject.dbo.Goodreads
ADD genre_fantasy NVARCHAR(255),
    genre_romance NVARCHAR(255),
    genre_young_adult NVARCHAR(255),
    genre_paranormal NVARCHAR(255),
    genre_classics NVARCHAR(255),
    genre_science_fiction NVARCHAR(255),
    genre_mystery NVARCHAR(255),
    genre_childrens NVARCHAR(255),
    genre_adventure NVARCHAR(255);
-- Exploring the most popular tags in the genres column
SELECT TOP 100
	value [word],
	COUNT(*) [#times]
FROM  PortfolioProject.dbo.Goodreads
CROSS APPLY STRING_SPLIT(Goodreads.genres, '|') 
GROUP BY value
ORDER BY COUNT(*) DESC
-- Cleaning up the Genres column
UPDATE PortfolioProject.dbo.Goodreads
SET genres = REPLACE(REPLACE(REPLACE(genres, '/genres/', ''), '-', ' '), '|', ' | ');
-- Splitting out the authors column based on the author_url column
ALTER TABLE PortfolioProject.dbo.Goodreads
ADD author NVARCHAR(255);

UPDATE PortfolioProject.dbo.Goodreads
SET author = REPLACE(PARSENAME(REPLACE(author_url, ',', '.'), 1), '_', ' ');
-- Removing duplicate rows
DELETE FROM PortfolioProject.dbo.Goodreads
WHERE isbn = '0439023483'
AND id <> (SELECT MIN(id)
           FROM PortfolioProject.dbo.Goodreads
           WHERE isbn = '0439023483');
-- Identifying duplicate values
SELECT isbn, COUNT(*)
FROM PortfolioProject.dbo.Goodreads
GROUP BY isbn
HAVING COUNT(*) > 1;

SELECT *
FROM PortfolioProject.dbo.Goodreads
WHERE isbn = '0439023483';
-- Standardising the ratings column 
ALTER TABLE PortfolioProject.dbo.Goodreads
ADD rating_100 float;
UPDATE PortfolioProject.dbo.Goodreads
SET rating_100 = rating * 20
 string s= "ayush";
    sort(s.begin(),s.end());
    cout<<s;
-- Updating the missing values to read as NULL
UPDATE PortfolioProject.dbo.Goodreads SET isbn = NULL WHERE isbn ='None';
UPDATE PortfolioProject.dbo.Goodreads SET media_type = NULL WHERE media_type ='None';
UPDATE PortfolioProject.dbo.Goodreads SET author_url = NULL WHERE author_url ='None';
UPDATE PortfolioProject.dbo.Goodreads SET title = NULL WHERE title='None';
UPDATE PortfolioProject.dbo.Goodreads SET genres = NULL WHERE genres='None';

-- Clearing the rows where most of the data is missing
DELETE FROM PortfolioProject.dbo.Goodreads WHERE media_type is null
if(str.find(goal) != string::npos)
           {return true;}
           else{return false;}
if(str.find(goal) != string::npos)
           {return true;}
           else{return false;}
-- Examining the table for null values
SELECT COUNT(*)-COUNT(rating) AS rating
	 , COUNT(*)-COUNT(numberofreviews) AS n_of_reviews
	 , COUNT(*)-COUNT(isbn) AS isbn
	 , COUNT(*)-COUNT(publishing_year) AS publishing_year
	 , COUNT(*)-COUNT(genres) AS genres
	 , COUNT(*)-COUNT(rating_count) AS rating_count
	 , COUNT(*)-COUNT(title) AS title
FROM PortfolioProject.dbo.Goodreads;
-- Renaming the columns 
EXEC sp_rename 'Goodreads.4#4', 'rating';
EXEC sp_rename 'Goodreads.136455', 'numberofreviews';
EXEC sp_rename 'Goodreads.0439023483', 'isbn';
EXEC sp_rename 'Goodreads.good_reads:book', 'media_type';
EXEC sp_rename 'Goodreads.https://www#goodreads#com/author/show/153394#Suzanne_Collins', 'author_url';
EXEC sp_rename 'Goodreads.2008', 'publishing_year';
EXEC sp_rename 'Goodreads./genres/young-adult|/genres/science-fiction|/genres/dystopia|/ge', 'genres';
EXEC sp_rename 'Goodreads.dir01/2767052-the-hunger-games#html', 'directory';
EXEC sp_rename 'Goodreads.2958974', 'rating_count';
EXEC sp_rename 'Goodreads.The Hunger Games (The Hunger Games, #1)', 'title';
-- Duplicating the first row of data before renaming the columns
INSERT INTO PortfolioProject.dbo.Goodreads
VALUES (
	4.4
	, 136455
	, '0439023483'
	, 'good_reads:book'
	, 'https://www.goodreads.com/author/show/153394.Suzanne_Collins'
	, 2008
	, '/genres/young-adult|/genres/science-fiction|/genres/dystopia|/genres/fantasy|/genres/science-fiction|/genres/romance|/genres/adventure|/genres/book-club|/genres/young-adult|/genres/teen|/genres/apocalyptic|/genres/post-apocalyptic|/genres/action'
	, 'dir01/2767052-the-hunger-games.html'
	, 2958974
	, 'The Hunger Games (The Hunger Games, #1)'
	)
-- Adding an ID column to improve table navigation
ALTER TABLE PortfolioProject.dbo.Goodreads
ADD id INT IDENTITY(1,1);
Waivers, at least at this time, are not integrated directly to the site. They only trigger when you’ve completed a booking with waivers assigned to them. They populate in the confirmation notes, similar to other waivers like Smartwaiver and Wherewolf.
If the lead is pushing to have waivers be integratable on the sites, ask the requester to submit these to the product feature request Custom Activity in Close.
-- Adding an ID column to improve table navigation
ALTER TABLE PortfolioProject.dbo.Goodreads
ADD id INT IDENTITY(1,1);
Anh gửi thông tin về Chatbot đối tác SMAX đang tích hợp trên fanpage
Viettel Money, cụ thể:

1. Thông tin tích hợp



2. Luồng tích hợp



3. Kịch bản test

- Truy cập đường dẫn:
https://miro.com/app/board/uXjVKLcwGxU=/?fbclid=IwZXh0bgNhZW0CMTAAAR0KSvvl8T
KF5FlWmIZhGbTqrmvBd_Ff0fUzzcOK7JIiP-GqqvaV5x7qv08_aem_ARrV9Ba0Zsj8FWU5bPDRQK
w8_8hKtH0x_NTigEusc8UGGg4kPDGLI6tmY9wfQeD6-0Te36JRXDm4AGuxiT3hEJTS

- Hướng dẫn test:

1. Người dùng bình luận tại bài viết theo đường dẫn
https://www.facebook.com/ViettelMoney/posts/pfbid0PjCVf1DL1A74j24ECkeMrekYJF
M3bXGqirFkqDV54esJy4Vvtbm4HsCNXw7NuXZYl

2. Chatbot gửi tin nhắn từ bình luận, người dùng tương tác theo luồng
tương ứng trên messenger

(ví dụ 1 luồng khai thác số điện thoại tại ảnh đính kèm)



Trân trọng!

------------------------

Dương Đức Anh

P. QLCL &amp; CSKH – TCT VDS

SĐT: 0964052947
function outerFunction() {
    let outerVariable = "I am outside!";

    function innerFunction() {
        console.log(outerVariable);
    }

    return innerFunction;
}

const closureExample = outerFunction();
closureExample(); // Output: "I am outside!"
setTimeout(() => {
    modalContainer.style.display = "block";
    
    
    setTimeout(() => {
        modalContainer.style.display = "none";
    }, 2000); // 2000 milliseconds = 2 seconds

}, 2000); // 2000 milliseconds = 2 seconds
let count = 0


function increaseCount(){
    count++
    counterValue.innerText = count
}

function decreaseCount(){
    if(count > 0){
        count--
    }
    counterValue.innerText = count
}

function resetCounter(){
    count = 0
    counterValue.innerText = count
}   
star

Tue May 21 2024 15:30:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 15:24:08 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 13:12:44 GMT+0000 (Coordinated Universal Time)

@aguelmann

star

Tue May 21 2024 12:59:54 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 09:24:18 GMT+0000 (Coordinated Universal Time)

@CodeWithSachin #aggregation #mongodb #todate #datediff

star

Tue May 21 2024 09:04:23 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 08:57:06 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:56:03 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:53:00 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:51:32 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:50:35 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:49:39 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:48:22 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:47:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:45:36 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:43:36 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:41:18 GMT+0000 (Coordinated Universal Time)

@Uncoverit #python

star

Tue May 21 2024 08:32:28 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:30:43 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:29:39 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:28:17 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:25:26 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:22:50 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:21:49 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:20:19 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:18:24 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:04:51 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:03:45 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:02:22 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 08:00:58 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:59:28 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:58:00 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:57:02 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:52:54 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:51:08 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 07:50:44 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:50:26 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 07:50:26 GMT+0000 (Coordinated Universal Time)

@ayushg103 #c++

star

Tue May 21 2024 07:48:24 GMT+0000 (Coordinated Universal Time) https://github.com/Susanna-Uncover/SQL-projects/blob/Projects/1%20SQL%20Goodreads%20Data%20Cleaning%20Project.sql

@Uncoverit #sql

star

Tue May 21 2024 07:42:28 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:40:27 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:35:47 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:35:22 GMT+0000 (Coordinated Universal Time)

@Shira

star

Tue May 21 2024 07:19:32 GMT+0000 (Coordinated Universal Time)

@Uncoverit #sql

star

Tue May 21 2024 07:10:15 GMT+0000 (Coordinated Universal Time)

@manhmd #java

star

Tue May 21 2024 06:30:13 GMT+0000 (Coordinated Universal Time)

@ishwarpatel22

star

Tue May 21 2024 06:29:24 GMT+0000 (Coordinated Universal Time)

@ishwarpatel22

star

Tue May 21 2024 05:31:56 GMT+0000 (Coordinated Universal Time)

@ishwarpatel22

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension