Snippets Collections
[
    {
      "$lookup": {
        "from": "Assessment",
        "localField": "assessmentId",
        "foreignField": "_id",
        "as": "assessment"
      }
    },
    {
      "$unwind": {
        "path": "$assessment",
        "preserveNullAndEmptyArrays": true
      }
    },
    {
      "$match": {
        "$and": [
          {
            "assessmentId": {
              "$oid": "622072b1aaeb4e5955319304"
            },
            "endCode": {
              "$ne": ""
            }
          }
        ]
      }
    },
   {
  "$project": {
    "_id": 1,
    "name": {
      "$cond": {
        "if": { "$ifNull": ["$assessmentType", false] },
        "then": "$assessmentType",
        "else": "IDRS"
      }
    },
    "assessmentId": "$_id",
    "ok": "$assessment.type",
    "createdAt": 1
  }
}
,
   {
      "$sort": {
        "createdAt": -1
      }
    }

  ]
[
    {
        "$match": {
            "userId": 
                ObjectId("64d1ee7758a82e63a46206fe")
            ,
            "$and": [
                {
                    "insertedFor": {
                        "$gte": {
                            "$date": "2024-02-18T09:52:03Z"
                        }
                    }
                },
                {
                    "insertedFor": {
                        "$lte": {
                            "$date": "2024-02-20T09:52:03Z"
                        }
                    }
                }
            ]
        }
    },
    {
        "$project": {
            "insertedFor": 1
        }
    },
  {
    $count:"toot"
  }
]
# 1- > Deleting based on _id in baselineTable

from bson import ObjectId
import json

file_path = 'delete_ids'

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']

delete_object_ids = [ObjectId(item) for item in data_array]
db["participantBaselineAndFollowupData"].delete_many({"_id": {"$in": delete_object_ids}})
#to check
# records = db["participantBaselineAndFollowupData"].find({"_id": {"$in": delete_object_ids}})
# count=0
# for record in records:
#     count=count+1
#     print(record["_id"])
# print(count)
#4-> Refer Subscription based on date.(2 subscription)

import json
from datetime import datetime
from bson.objectid import ObjectId

with open('referSubByDate.json', 'r') as file:
    data = json.load(file)

not_found_ids = []
not_updated_ids = []

for item in data['data']:
    _id = item['id']
    date = item['date']
    date_object = datetime.strptime(date, "%d/%m/%Y")
    new_date = date_object.strftime("%Y-%m-%dT%H:%M:%S.000+00:00")
    
    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})
    
    if participant_record:
        user_id = participant_record["participantId"]
        
        subscription_record = db["subscription"].find_one({"userId": user_id, "startDate": new_date})
        
        if subscription_record:
            startDate = subscription_record["startDate"]
            program_code = subscription_record["subscriptionPlan"].get("programCode")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": startDate}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")

if not_found_ids:
    print(f"IDs not found: {not_found_ids}")

if not_updated_ids:
    print(f"IDs not updated: {not_updated_ids}")

        
    

#3-> one baseline 2 subs refer the given program code
import json

file_path = "oneBase2Sub"
try:
    with open(file_path, "r") as file:
        data1 = json.load(file)
        # print(data)

    data_list = list(data1.items())
    # print(data_list)
except Exception as e:
    print("Error:", e)


print(len(data_list))









from bson.objectid import ObjectId

not_found_ids = []
not_updated_ids = []

for data_tuple in data_list:
    _id, program_code = data_tuple

    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})

    if participant_record:
        user_id = participant_record["participantId"]

        subscription_record = db["subscription"].find_one({"userId": user_id, "subscriptionPlan.programCode": program_code})
        
        if subscription_record:
            program_start_date = subscription_record.get("startDate", "")
            program_code_from_subscription = subscription_record["subscriptionPlan"].get("programCode", "")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")


if not_found_ids:
    print(f"IDs not found: {len(not_found_ids)}")
    print(f"IDs not found: {not_found_ids}")

if not_updated_ids:
    print(f"IDs not updated: {len(not_updated_ids)}")
    print(f"IDs not updated: {not_updated_ids}")
# 2-> 1 subs 1 baseline
import json

file_path = "referOneSub"

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']
insert_object_ids = [ObjectId(_id) for _id in data_array]
print(len(insert_object_ids))
# print(insert_object_ids)




from bson.objectid import ObjectId

not_found_ids = []
not_updated_ids = []

for _id in insert_object_ids:
    record = db["participantBaselineAndFollowupData"].find_one({"_id": _id})
    
    if record:
        user_id = record["participantId"]
        
        subscription = db["subscription"].find_one({"userId": user_id})
        
        if subscription:
            program_start_date = subscription.get("startDate", "")
            program_code = subscription["subscriptionPlan"].get("programCode", "")
            
            result = db["participantBaselineAndFollowupData"].update_one(
                {"_id": _id},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
            
            if result.modified_count == 0:
                not_updated_ids.append(_id)
        else:
            not_updated_ids.append(_id)
    else:
        not_found_ids.append(_id)

print("Completed")

if not_found_ids:
    print(f"IDs not found: {print(not_found_ids)}")

if not_updated_ids:
    print(f"IDs not updated: {len(not_updated_ids)}")
    print(f"IDs not updated: {not_updated_ids}")

    
#3-> one baseline 2 subs refer the given program code
import json

file_path = "oneBase2Sub"
try:
    with open(file_path, "r") as file:
        data1 = json.load(file)
        # print(data)

    data_list = list(data1.items())
    # print(data_list)
except Exception as e:
    print("Error:", e)


print(len(data_list))

for data_tuple in data_list:
    _id, program_code = data_tuple

    participant_record = db["participantBaselineAndFollowupData"].find_one({"_id": ObjectId(_id)})

    if participant_record:
        user_id = participant_record["participantId"]

        subscription_record = db["subscription"].find_one({"userId": user_id, "subscriptionPlan.programCode": program_code})
        
        if subscription_record:
            program_start_date = subscription_record.get("startDate", "")
            program_code_from_subscription = subscription_record["subscriptionPlan"].get("programCode", "")
#             print(program_start_date)
#             print(program_code_from_subscription)
            db["participantBaselineAndFollowupData"].update_one(
                {"_id": participant_record["_id"]},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
print("Completed")
# 2-> 1 subs 1 baseline
import json

file_path = "referOneSub"

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']
insert_object_ids = [ObjectId(_id) for _id in data_array]
print(len(insert_object_ids))
# print(insert_object_ids)

from bson.objectid import ObjectId

for _id in insert_object_ids:
#     print("one")
    record = db["participantBaselineAndFollowupData"].find_one({"_id": _id})
    
    if record:
        user_id = record["participantId"]
        
        subscription = db["subscription"].find_one({"userId": user_id})
        
        if subscription:
            program_start_date = subscription.get("startDate", "")
            program_code = subscription["subscriptionPlan"].get("programCode", "")
            
            db["participantBaselineAndFollowupData"].update_one(
                {"_id": _id},
                {"$set": {"programCode": program_code, "programStartDate": program_start_date}}
            )
print("completed")
    
# 1- > Deleting based on _id in baselineTable

from bson import ObjectId
import json

file_path = 'delete_ids'

with open(file_path, 'r') as file:
    data = json.load(file)

data_array = data['data']

delete_object_ids = [ObjectId(item) for item in data_array]
db["participantBaselineAndFollowupData"].delete_many({"_id": {"$in": delete_object_ids}})
#to check
# records = db["participantBaselineAndFollowupData"].find({"_id": {"$in": delete_object_ids}})
# count=0
# for record in records:
#     count=count+1
#     print(record["_id"])
# print(count)
db.Order.aggregate([
    { '$match'    : { "company_id" : ObjectId("54c0...") } },
    { '$sort'     : { 'order_number' : -1 } },
    { '$facet'    : {
        metadata: [ { $count: "total" }, { $addFields: { page: NumberInt(3) } } ],
        data: [ { $skip: 20 }, { $limit: 10 } ] // add projection here wish you re-shape the docs
    } }
] )
star

Fri Mar 01 2024 04:29:49 GMT+0000 (Coordinated Universal Time) http://34.74.16.180:3000/question#eyJkYXRhc2V0X3F1ZXJ5Ijp7InR5cGUiOiJuYXRpdmUiLCJuYXRpdmUiOnsiY29sbGVjdGlvbiI6InVzZXJBc3Nlc3NtZW50TWFwcGluZyIsInF1ZXJ5IjoiW1xuICAgIHtcbiAgICAgIFwiJGxvb2t1cFwiOiB7XG4gICAgICAgIFwiZnJvbVwiOiBcIkFzc2Vzc21lbnRcIixcbiAgICAgICAgXCJsb2NhbEZpZWxkXCI6IFwiYXNzZXNzbWVudElkXCIsXG4gICAgICAgIFwiZm9yZWlnbkZpZWxkXCI6IFwiX2lkXCIsXG4gICAgICAgIFwiYXNcIjogXCJhc3Nlc3NtZW50XCJcbiAgICAgIH1cbiAgICB9LFxuICAgIHtcbiAgICAgIFwiJHVud2luZFwiOiB7XG4gICAgICAgIFwicGF0aFwiOiBcIiRhc3Nlc3NtZW50XCIsXG4gICAgICAgIFwicHJlc2VydmVOdWxsQW5kRW1wdHlBcnJheXNcIjogdHJ1ZVxuICAgICAgfVxuICAgIH0sXG4gICAge1xuICAgICAgXCIkbWF0Y2hcIjoge1xuICAgICAgICBcIiRhbmRcIjogW1xuICAgICAgICAgIHtcbiAgICAgICAgICAgIFwiYXNzZXNzbWVudElkXCI6IHtcbiAgICAgICAgICAgICAgXCIkb2lkXCI6IFwiNjIyMDcyYjFhYWViNGU1OTU1MzE5MzA0XCJcbiAgICAgICAgICAgIH0sXG4gICAgICAgICAgICBcImVuZENvZGVcIjoge1xuICAgICAgICAgICAgICBcIiRuZVwiOiBcIlwiXG4gICAgICAgICAgICB9XG4gICAgICAgICAgfVxuICAgICAgICBdXG4gICAgICB9XG4gICAgfSxcbiAgIHtcbiAgXCIkcHJvamVjdFwiOiB7XG4gICAgXCJfaWRcIjogMSxcbiAgICBcIm5hbWVcIjoge1xuICAgICAgXCIkY29uZFwiOiB7XG4gICAgICAgIFwiaWZcIjogeyBcIiRpZk51bGxcIjogW1wiJGFzc2Vzc21lbnRUeXBlXCIsIGZhbHNlXSB9LFxuICAgICAgICBcInRoZW5cIjogXCIkYXNzZXNzbWVudFR5cGVcIixcbiAgICAgICAgXCJlbHNlXCI6IFwiSURSU1wiXG4gICAgICB9XG4gICAgfSxcbiAgICBcImFzc2Vzc21lbnRJZFwiOiBcIiRfaWRcIixcbiAgICBcIm9rXCI6IFwiJGFzc2Vzc21lbnQudHlwZVwiLFxuICAgIFwiY3JlYXRlZEF0XCI6IDFcbiAgfVxufVxuLFxuICAge1xuICAgICAgXCIkc29ydFwiOiB7XG4gICAgICAgIFwiY3JlYXRlZEF0XCI6IC0xXG4gICAgICB9XG4gICAgfVxuXG4gIF0iLCJ0ZW1wbGF0ZS10YWdzIjp7fX0sImRhdGFiYXNlIjoyfSwiZGlzcGxheSI6InRhYmxlIiwidmlzdWFsaXphdGlvbl9zZXR0aW5ncyI6e319

##jupyter #aggregation #mongodb
star

Tue Feb 20 2024 05:55:17 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:01:35 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:00:35 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 07:00:07 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Mon Jan 15 2024 06:59:31 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:52:46 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:52:19 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Fri Jan 12 2024 13:51:37 GMT+0000 (Coordinated Universal Time)

##jupyter #aggregation
star

Thu Feb 16 2023 05:30:40 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/48305624/how-to-use-mongodb-aggregation-for-pagination

#aggregation #mongodb #pagination

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension