Snippets Collections
[
    {
        "$match": {
            "partnerShortCode": {
                "$in": []
            }
        }
    },
    {
        "$group": {
            "_id": "$partnerShortCode",
            "clinics": {
                "$push": {
                    "clinicName": "$name",
                    "clinicId": "$_id"
                }
            }
        }
    },
    {
        "$project": {
            _id:0,
            "partnerShortCode": "$_id",
            "clinics": 1
        }
    }
]
[
  {
    $match: {
  $and: [
    { "age": { $type: "string" } },
    { $expr: { $gt: [{ $strLenCP: "$age" }, 5] } }
  ]
}
  },
    {
      $project: {
        email:1
      
    }
    }
  
]
[
    {
        $group: {
            _id: "$participantId",
            documents: { $push: "$$ROOT" },
            count: { $sum: 1 }
        }
    },
    {
        $match: {
            count: { $gt: 1 }
        }
    },
    {
        $unwind: "$documents"
    },
    {
        $match: {
            "documents.programCode": { $ne: "" }
        }
    },
    {
        $group: {
            _id: {
                participantId: "$_id",
                programCode: "$documents.programCode",
                programStartDate: "$documents.programStartDate"
            },
            baselineId: { $first: "$_id" },
            documentIds: { $push: "$documents._id" }, 
            documents: { $push: "$documents" },
            count: { $sum: 1 }
        }
    },
    {
        $match: {
            count: { $gt: 1 }
        }
    },
    {
        $project: {
            _id: 1,
            participantId: "$_id.participantId",
            programCode: "$_id.programCode",
            programStartDate: "$_id.programStartDate",
            baselineId: 1,
            documentIds: 1 
        }
    },
    {
        $lookup: {
            "from": "participant",
            "localField": "participantId",
            "foreignField": "_id",
            "as": "temp"
        }
    },
    {
        $match: {
            "temp.userStatus": { $ne: "TEST" },
            $and: [
                { "temp.email": { $nin: [/deleted/] } },
                { "temp": { $ne: null } }
            ]
        }
    },
    {
        $unwind:"$documentIds"
    }
    {
        $project: {
            _id:0
            "participantId": { $arrayElemAt: ["$temp._id", 0] },
            "email": { $arrayElemAt: ["$temp.email", 0] },
            "documentIds": 1 
        }
    }
]
[
    {
        $group: {
            _id: "$participantId",
            documents: { $push: "$$ROOT" },
            count: { $sum: 1 }
        }
    },
    {
        $match: {
            count: { $gt: 1 }
        }
    },
    {
        $unwind: "$documents"
    },
    {
        $match: {
            "documents.programCode": { $ne: "" }
        }
    },
    {
        $group: {
            _id: {
                participantId: "$_id",
                programCode: "$documents.programCode",
                programStartDate: "$documents.programStartDate"
            },
            documents: { $push: "$documents" },
            count: { $sum: 1 }
        }
    },
    {
        $match: {
            count: { $gt: 1 }
        }
    },
    {
        $project: {
            _id: 0,
            participantId: "$_id.participantId",
            programCode: "$_id.programCode",
            programStartDate: "$_id.programStartDate"
        }
    },
    {
        $lookup: {
            "from": "participant",
            "localField": "participantId",
            "foreignField": "_id",
            "as": "temp"
        }
    },
    {
        $match: {
            "temp.userStatus": { $ne: "TEST" }
        }
    },
    {
        $project: {
            "id":{ $arrayElemAt: ["$temp._id", 0] }
            "email": { $arrayElemAt: ["$temp.email", 0] }
        }
    }
]
[
    {
      "$lookup": {
        "from": "Assessment",
        "localField": "assessmentId",
        "foreignField": "_id",
        "as": "assessment"
      }
    },
    {
      "$unwind": {
        "path": "$assessment",
        "preserveNullAndEmptyArrays": true
      }
    },
    {
      "$match": {
        "$and": [
          {
            "assessmentId": {
              "$oid": "622072b1aaeb4e5955319304"
            },
            "endCode": {
              "$ne": ""
            }
          }
        ]
      }
    },
   {
  "$project": {
    "_id": 1,
    "name": {
      "$cond": {
        "if": { "$ifNull": ["$assessmentType", false] },
        "then": "$assessmentType",
        "else": "IDRS"
      }
    },
    "assessmentId": "$_id",
    "ok": "$assessment.type",
    "createdAt": 1
  }
}
,
   {
      "$sort": {
        "createdAt": -1
      }
    }

  ]
[
    {
        "$match": {
            "userId": 
                ObjectId("64d1ee7758a82e63a46206fe")
            ,
            "$and": [
                {
                    "insertedFor": {
                        "$gte": {
                            "$date": "2024-02-18T09:52:03Z"
                        }
                    }
                },
                {
                    "insertedFor": {
                        "$lte": {
                            "$date": "2024-02-20T09:52:03Z"
                        }
                    }
                }
            ]
        }
    },
    {
        "$project": {
            "insertedFor": 1
        }
    },
  {
    $count:"toot"
  }
]
# 1- > Deleting based on _id in baselineTable

from bson import ObjectId
import json

file_path = 'delete_ids'

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']

delete_object_ids = [ObjectId(item) for item in data_array]
db["participantBaselineAndFollowupData"].delete_many({"_id": {"$in": delete_object_ids}})
#to check
# records = db["participantBaselineAndFollowupData"].find({"_id": {"$in": delete_object_ids}})
# count=0
# for record in records:
#     count=count+1
#     print(record["_id"])
# print(count)
#4-> Refer Subscription based on date.(2 subscription)

import json
from datetime import datetime
from bson.objectid import ObjectId

with open('referSubByDate.json', 'r') as file:
    data = json.load(file)

not_found_ids = []
not_updated_ids = []

for item in data['data']:
    _id = item['id']
    date = item['date']
    date_object = datetime.strptime(date, "%d/%m/%Y")
    new_date = date_object.strftime("%Y-%m-%dT%H:%M:%S.000+00:00")
    
    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})
    
    if participant_record:
        user_id = participant_record["participantId"]
        
        subscription_record = db["subscription"].find_one({"userId": user_id, "startDate": new_date})
        
        if subscription_record:
            startDate = subscription_record["startDate"]
            program_code = subscription_record["subscriptionPlan"].get("programCode")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": startDate}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")

if not_found_ids:
    print(f"IDs not found: {not_found_ids}")

if not_updated_ids:
    print(f"IDs not updated: {not_updated_ids}")

        
    

#3-> one baseline 2 subs refer the given program code
import json

file_path = "oneBase2Sub"
try:
    with open(file_path, "r") as file:
        data1 = json.load(file)
        # print(data)

    data_list = list(data1.items())
    # print(data_list)
except Exception as e:
    print("Error:", e)


print(len(data_list))









from bson.objectid import ObjectId

not_found_ids = []
not_updated_ids = []

for data_tuple in data_list:
    _id, program_code = data_tuple

    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})

    if participant_record:
        user_id = participant_record["participantId"]

        subscription_record = db["subscription"].find_one({"userId": user_id, "subscriptionPlan.programCode": program_code})
        
        if subscription_record:
            program_start_date = subscription_record.get("startDate", "")
            program_code_from_subscription = subscription_record["subscriptionPlan"].get("programCode", "")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")


if not_found_ids:
    print(f"IDs not found: {len(not_found_ids)}")
    print(f"IDs not found: {not_found_ids}")

if not_updated_ids:
    print(f"IDs not updated: {len(not_updated_ids)}")
    print(f"IDs not updated: {not_updated_ids}")
# 2-> 1 subs 1 baseline
import json

file_path = "referOneSub"

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']
insert_object_ids = [ObjectId(_id) for _id in data_array]
print(len(insert_object_ids))
# print(insert_object_ids)




from bson.objectid import ObjectId

not_found_ids = []
not_updated_ids = []

for _id in insert_object_ids:
    record = db["participantBaselineAndFollowupData"].find_one({"_id": _id})
    
    if record:
        user_id = record["participantId"]
        
        subscription = db["subscription"].find_one({"userId": user_id})
        
        if subscription:
            program_start_date = subscription.get("startDate", "")
            program_code = subscription["subscriptionPlan"].get("programCode", "")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": _id},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")

if not_found_ids:
    print(f"IDs not found: {print(not_found_ids)}")

if not_updated_ids:
    print(f"IDs not updated: {len(not_updated_ids)}")
    print(f"IDs not updated: {not_updated_ids}")

    
#3-> one baseline 2 subs refer the given program code
import json

file_path = "oneBase2Sub"
try:
    with open(file_path, "r") as file:
        data1 = json.load(file)
        # print(data)

    data_list = list(data1.items())
    # print(data_list)
except Exception as e:
    print("Error:", e)


print(len(data_list))

for data_tuple in data_list:
    _id, program_code = data_tuple

    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})

    if participant_record:
        user_id = participant_record["participantId"]

        subscription_record = db["subscription"].find_one({"userId": user_id, "subscriptionPlan.programCode": program_code})
        
        if subscription_record:
            program_start_date = subscription_record.get("startDate", "")
            program_code_from_subscription = subscription_record["subscriptionPlan"].get("programCode", "")
#             print(program_start_date)
#             print(program_code_from_subscription)
            db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
print("Completed")
# 2-> 1 subs 1 baseline
import json

file_path = "referOneSub"

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']
insert_object_ids = [ObjectId(_id) for _id in data_array]
print(len(insert_object_ids))
# print(insert_object_ids)

from bson.objectid import ObjectId

for _id in insert_object_ids:
#     print("one")
    record = db["participantBaselineAndFollowupData"].find_one({"_id": _id})
    
    if record:
        user_id = record["participantId"]
        
        subscription = db["subscription"].find_one({"userId": user_id})
        
        if subscription:
            program_start_date = subscription.get("startDate", "")
            program_code = subscription["subscriptionPlan"].get("programCode", "")
            
            db["participantBaselineAndFollowupData"].update_one(
                {"_id": _id},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
print("completed")
    
# 1- > Deleting based on _id in baselineTable

from bson import ObjectId
import json

file_path = 'delete_ids'

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']

delete_object_ids = [ObjectId(item) for item in data_array]
db["participantBaselineAndFollowupData"].delete_many({"_id": {"$in": delete_object_ids}})
#to check
# records = db["participantBaselineAndFollowupData"].find({"_id": {"$in": delete_object_ids}})
# count=0
# for record in records:
#     count=count+1
#     print(record["_id"])
# print(count)
db.Order.aggregate([
    { '$match'    : { "company_id" : ObjectId("54c0...") } },
    { '$sort'     : { 'order_number' : -1 } },
    { '$facet'    : {
        metadata: [ { $count: "total" }, { $addFields: { page: NumberInt(3) } } ],
        data: [ { $skip: 20 }, { $limit: 10 } ] // add projection here wish you re-shape the docs
    } }
] )
star

Fri Mar 22 2024 16:37:26 GMT+0000 (Coordinated Universal Time) http://34.74.16.180:3000/question#eyJkYXRhc2V0X3F1ZXJ5Ijp7InR5cGUiOiJuYXRpdmUiLCJuYXRpdmUiOnsiY29sbGVjdGlvbiI6ImNsaW5pY3MiLCJxdWVyeSI6IltcclxuICAgIHtcclxuICAgICAgICBcIiRtYXRjaFwiOiB7XHJcbiAgICAgICAgICAgIFwicGFydG5lclNob3J0Q29kZVwiOiB7XHJcbiAgICAgICAgICAgICAgICBcIiRpblwiOiBbXVxyXG4gICAgICAgICAgICB9XHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICBcIiRncm91cFwiOiB7XHJcbiAgICAgICAgICAgIFwiX2lkXCI6IFwiJHBhcnRuZXJTaG9ydENvZGVcIixcclxuICAgICAgICAgICAgXCJjbGluaWNzXCI6IHtcclxuICAgICAgICAgICAgICAgIFwiJHB1c2hcIjoge1xyXG4gICAgICAgICAgICAgICAgICAgIFwiY2xpbmljTmFtZVwiOiBcIiRuYW1lXCIsXHJcbiAgICAgICAgICAgICAgICAgICAgXCJjbGluaWNJZFwiOiBcIiRfaWRcIlxyXG4gICAgICAgICAgICAgICAgfVxyXG4gICAgICAgICAgICB9XHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICBcIiRwcm9qZWN0XCI6IHtcclxuICAgICAgICAgICAgX2lkOjAsXHJcbiAgICAgICAgICAgIFwicGFydG5lclNob3J0Q29kZVwiOiBcIiRfaWRcIixcclxuICAgICAgICAgICAgXCJjbGluaWNzXCI6IDFcclxuICAgICAgICB9XHJcbiAgICB9XHJcbl1cclxuIiwidGVtcGxhdGUtdGFncyI6e319LCJkYXRhYmFzZSI6Mn0sImRpc3BsYXkiOiJ0YWJsZSIsInZpc3VhbGl6YXRpb25fc2V0dGluZ3MiOnt9fQ==

#aggregation #mongodb #$push
star

Tue Mar 19 2024 16:09:32 GMT+0000 (Coordinated Universal Time)

#aggregation #mongodb
star

Tue Mar 12 2024 14:39:42 GMT+0000 (Coordinated Universal Time) http://34.74.16.180:3000/question#eyJkYXRhc2V0X3F1ZXJ5Ijp7ImRhdGFiYXNlIjoyLCJuYXRpdmUiOnsidGVtcGxhdGUtdGFncyI6e30sInF1ZXJ5IjoiW1xyXG4gICAge1xyXG4gICAgICAgICRncm91cDoge1xyXG4gICAgICAgICAgICBfaWQ6IFwiJHBhcnRpY2lwYW50SWRcIixcclxuICAgICAgICAgICAgZG9jdW1lbnRzOiB7ICRwdXNoOiBcIiQkUk9PVFwiIH0sXHJcbiAgICAgICAgICAgIGNvdW50OiB7ICRzdW06IDEgfVxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgJG1hdGNoOiB7XHJcbiAgICAgICAgICAgIGNvdW50OiB7ICRndDogMSB9XHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICAkdW53aW5kOiBcIiRkb2N1bWVudHNcIlxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICAkbWF0Y2g6IHtcclxuICAgICAgICAgICAgXCJkb2N1bWVudHMucHJvZ3JhbUNvZGVcIjogeyAkbmU6IFwiXCIgfVxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgJGdyb3VwOiB7XHJcbiAgICAgICAgICAgIF9pZDoge1xyXG4gICAgICAgICAgICAgICAgcGFydGljaXBhbnRJZDogXCIkX2lkXCIsXHJcbiAgICAgICAgICAgICAgICBwcm9ncmFtQ29kZTogXCIkZG9jdW1lbnRzLnByb2dyYW1Db2RlXCIsXHJcbiAgICAgICAgICAgICAgICBwcm9ncmFtU3RhcnREYXRlOiBcIiRkb2N1bWVudHMucHJvZ3JhbVN0YXJ0RGF0ZVwiXHJcbiAgICAgICAgICAgIH0sXHJcbiAgICAgICAgICAgIGJhc2VsaW5lSWQ6IHsgJGZpcnN0OiBcIiRfaWRcIiB9LFxyXG4gICAgICAgICAgICBkb2N1bWVudElkczogeyAkcHVzaDogXCIkZG9jdW1lbnRzLl9pZFwiIH0sIFxyXG4gICAgICAgICAgICBkb2N1bWVudHM6IHsgJHB1c2g6IFwiJGRvY3VtZW50c1wiIH0sXHJcbiAgICAgICAgICAgIGNvdW50OiB7ICRzdW06IDEgfVxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgJG1hdGNoOiB7XHJcbiAgICAgICAgICAgIGNvdW50OiB7ICRndDogMSB9XHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICAkcHJvamVjdDoge1xyXG4gICAgICAgICAgICBfaWQ6IDEsXHJcbiAgICAgICAgICAgIHBhcnRpY2lwYW50SWQ6IFwiJF9pZC5wYXJ0aWNpcGFudElkXCIsXHJcbiAgICAgICAgICAgIHByb2dyYW1Db2RlOiBcIiRfaWQucHJvZ3JhbUNvZGVcIixcclxuICAgICAgICAgICAgcHJvZ3JhbVN0YXJ0RGF0ZTogXCIkX2lkLnByb2dyYW1TdGFydERhdGVcIixcclxuICAgICAgICAgICAgYmFzZWxpbmVJZDogMSxcclxuICAgICAgICAgICAgZG9jdW1lbnRJZHM6IDEgXHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICAkbG9va3VwOiB7XHJcbiAgICAgICAgICAgIFwiZnJvbVwiOiBcInBhcnRpY2lwYW50XCIsXHJcbiAgICAgICAgICAgIFwibG9jYWxGaWVsZFwiOiBcInBhcnRpY2lwYW50SWRcIixcclxuICAgICAgICAgICAgXCJmb3JlaWduRmllbGRcIjogXCJfaWRcIixcclxuICAgICAgICAgICAgXCJhc1wiOiBcInRlbXBcIlxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgJG1hdGNoOiB7XHJcbiAgICAgICAgICAgIFwidGVtcC51c2VyU3RhdHVzXCI6IHsgJG5lOiBcIlRFU1RcIiB9LFxyXG4gICAgICAgICAgICAkYW5kOiBbXHJcbiAgICAgICAgICAgICAgICB7IFwidGVtcC5lbWFpbFwiOiB7ICRuaW46IFsvZGVsZXRlZC9dIH0gfSxcclxuICAgICAgICAgICAgICAgIHsgXCJ0ZW1wXCI6IHsgJG5lOiBudWxsIH0gfVxyXG4gICAgICAgICAgICBdXHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICAkdW53aW5kOlwiJGRvY3VtZW50SWRzXCJcclxuICAgIH1cclxuICAgIHtcclxuICAgICAgICAkcHJvamVjdDoge1xyXG4gICAgICAgICAgICBfaWQ6MFxyXG4gICAgICAgICAgICBcInBhcnRpY2lwYW50SWRcIjogeyAkYXJyYXlFbGVtQXQ6IFtcIiR0ZW1wLl9pZFwiLCAwXSB9LFxyXG4gICAgICAgICAgICBcImVtYWlsXCI6IHsgJGFycmF5RWxlbUF0OiBbXCIkdGVtcC5lbWFpbFwiLCAwXSB9LFxyXG4gICAgICAgICAgICBcImRvY3VtZW50SWRzXCI6IDEgXHJcbiAgICAgICAgfVxyXG4gICAgfVxyXG5dXHJcbiIsImNvbGxlY3Rpb24iOiJwYXJ0aWNpcGFudEJhc2VsaW5lQW5kRm9sbG93dXBEYXRhIn0sInR5cGUiOiJuYXRpdmUifSwiZGlzcGxheSI6InRhYmxlIiwidmlzdWFsaXphdGlvbl9zZXR0aW5ncyI6e319

##jupyter #aggregation #mongodb
star

Fri Mar 08 2024 10:24:18 GMT+0000 (Coordinated Universal Time) http://34.74.16.180:3000/question#eyJkYXRhc2V0X3F1ZXJ5Ijp7ImRhdGFiYXNlIjoyLCJuYXRpdmUiOnsidGVtcGxhdGUtdGFncyI6e30sInF1ZXJ5IjoiW1xyXG4gICAge1xyXG4gICAgICAgIFwiJG1hdGNoXCI6IHtcclxuICAgICAgICAgICAgXCJlbWFpbFwiOiBcInByb2R1Y3RAc21pdC5maXRcIlxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgXCIkbG9va3VwXCI6IHtcclxuICAgICAgICAgICAgXCJmcm9tXCI6IFwidXNlcnNcIixcclxuICAgICAgICAgICAgXCJsb2NhbEZpZWxkXCI6IFwiZW1haWxcIixcclxuICAgICAgICAgICAgXCJmb3JlaWduRmllbGRcIjogXCJlbWFpbFwiLFxyXG4gICAgICAgICAgICBcImFzXCI6IFwidXNlckxrcFwiXHJcbiAgICAgICAgfVxyXG4gICAgfSxcclxuICAgIHtcclxuICAgICAgICBcIiR1bndpbmRcIjoge1xyXG4gICAgICAgICAgICBcInBhdGhcIjogXCIkdXNlckxrcFwiLFxyXG4gICAgICAgICAgICBcInByZXNlcnZlTnVsbEFuZEVtcHR5QXJyYXlzXCI6IHRydWVcclxuICAgICAgICB9XHJcbiAgICB9LFxyXG4gICAge1xyXG4gICAgICAgIFwiJG1hdGNoXCI6IHtcclxuICAgICAgICAgICAgXCJ1c2VyTGtwLnBvcnRhbFBhc3N3b3JkXCI6IFwic21pdEBwcm9kdWN0I0AxMjFcIlxyXG4gICAgICAgIH1cclxuICAgIH0sXHJcbiAgICB7XHJcbiAgICAgICAgXCIkcHJvamVjdFwiOiB7XHJcbiAgICAgICAgICAgIFwiZmlyc3ROYW1lXCI6IDEsXHJcbiAgICAgICAgICAgIFwibWlkZGxlTmFtZVwiOiAxLFxyXG4gICAgICAgICAgICBcImxhc3ROYW1lXCI6IDEsXHJcbiAgICAgICAgICAgIFwiZW1haWxcIjogMSxcclxuICAgICAgICAgICAgXCJtb2JpbGVcIjogMSxcclxuICAgICAgICAgICAgXCJyb2xlXCI6IDEsXHJcbiAgICAgICAgICAgIFwicGFydG5lclNob3J0Q29kZVwiOiAxLFxyXG4gICAgICAgICAgICBcInVzZXJHcm91cHNcIjogXCIkdXNlckxrcC51c2VyR3JvdXBzXCIsXHJcbiAgICAgICAgICAgIFwicHJvZmlsZVBpY3R1cmVVUkxcIjogMSxcclxuICAgICAgICAgICAgXCJwb3J0YWxQYXNzd29yZFwiOiBcIiR1c2VyTGtwLnBvcnRhbFBhc3N3b3JkXCIsXHJcbiAgICAgICAgICAgIFwiYXV0aG9yaXphdGlvblwiOiBcIiR1c2VyTGtwLmF1dGhvcml6YXRpb25cIlxyXG4gICAgICAgIH1cclxuICAgIH1cclxuXSIsImNvbGxlY3Rpb24iOiJ1c2VycyJ9LCJ0eXBlIjoibmF0aXZlIn0sImRpc3BsYXkiOiJ0YWJsZSIsInZpc3VhbGl6YXRpb25fc2V0dGluZ3MiOnt9fQ==

##jupyter #aggregation #mongodb
star

Fri Mar 01 2024 04:29:49 GMT+0000 (Coordinated Universal Time) http://34.74.16.180:3000/question#eyJkYXRhc2V0X3F1ZXJ5Ijp7InR5cGUiOiJuYXRpdmUiLCJuYXRpdmUiOnsiY29sbGVjdGlvbiI6InVzZXJBc3Nlc3NtZW50TWFwcGluZyIsInF1ZXJ5IjoiW1xuICAgIHtcbiAgICAgIFwiJGxvb2t1cFwiOiB7XG4gICAgICAgIFwiZnJvbVwiOiBcIkFzc2Vzc21lbnRcIixcbiAgICAgICAgXCJsb2NhbEZpZWxkXCI6IFwiYXNzZXNzbWVudElkXCIsXG4gICAgICAgIFwiZm9yZWlnbkZpZWxkXCI6IFwiX2lkXCIsXG4gICAgICAgIFwiYXNcIjogXCJhc3Nlc3NtZW50XCJcbiAgICAgIH1cbiAgICB9LFxuICAgIHtcbiAgICAgIFwiJHVud2luZFwiOiB7XG4gICAgICAgIFwicGF0aFwiOiBcIiRhc3Nlc3NtZW50XCIsXG4gICAgICAgIFwicHJlc2VydmVOdWxsQW5kRW1wdHlBcnJheXNcIjogdHJ1ZVxuICAgICAgfVxuICAgIH0sXG4gICAge1xuICAgICAgXCIkbWF0Y2hcIjoge1xuICAgICAgICBcIiRhbmRcIjogW1xuICAgICAgICAgIHtcbiAgICAgICAgICAgIFwiYXNzZXNzbWVudElkXCI6IHtcbiAgICAgICAgICAgICAgXCIkb2lkXCI6IFwiNjIyMDcyYjFhYWViNGU1OTU1MzE5MzA0XCJcbiAgICAgICAgICAgIH0sXG4gICAgICAgICAgICBcImVuZENvZGVcIjoge1xuICAgICAgICAgICAgICBcIiRuZVwiOiBcIlwiXG4gICAgICAgICAgICB9XG4gICAgICAgICAgfVxuICAgICAgICBdXG4gICAgICB9XG4gICAgfSxcbiAgIHtcbiAgXCIkcHJvamVjdFwiOiB7XG4gICAgXCJfaWRcIjogMSxcbiAgICBcIm5hbWVcIjoge1xuICAgICAgXCIkY29uZFwiOiB7XG4gICAgICAgIFwiaWZcIjogeyBcIiRpZk51bGxcIjogW1wiJGFzc2Vzc21lbnRUeXBlXCIsIGZhbHNlXSB9LFxuICAgICAgICBcInRoZW5cIjogXCIkYXNzZXNzbWVudFR5cGVcIixcbiAgICAgICAgXCJlbHNlXCI6IFwiSURSU1wiXG4gICAgICB9XG4gICAgfSxcbiAgICBcImFzc2Vzc21lbnRJZFwiOiBcIiRfaWRcIixcbiAgICBcIm9rXCI6IFwiJGFzc2Vzc21lbnQudHlwZVwiLFxuICAgIFwiY3JlYXRlZEF0XCI6IDFcbiAgfVxufVxuLFxuICAge1xuICAgICAgXCIkc29ydFwiOiB7XG4gICAgICAgIFwiY3JlYXRlZEF0XCI6IC0xXG4gICAgICB9XG4gICAgfVxuXG4gIF0iLCJ0ZW1wbGF0ZS10YWdzIjp7fX0sImRhdGFiYXNlIjoyfSwiZGlzcGxheSI6InRhYmxlIiwidmlzdWFsaXphdGlvbl9zZXR0aW5ncyI6e319

##jupyter #aggregation #mongodb
star

Tue Feb 20 2024 05:55:17 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:01:35 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:00:35 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:00:07 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 06:59:31 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:52:46 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:52:19 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:51:37 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Thu Feb 16 2023 05:30:40 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/48305624/how-to-use-mongodb-aggregation-for-pagination

#aggregation #mongodb #pagination

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension