image_url
stringlengths 113
131
⌀ | fancy_title
stringlengths 8
396
| views
int64 73
422k
| title
stringlengths 8
254
| created_at
stringlengths 24
24
| discussion
list | tags
sequence |
---|---|---|---|---|---|---|
Error registering user MongooseError: Operation `users.findOne()` buffering timed out after 10000ms at Timeout | 1,129 | Error registering user MongooseError: Operation `users.findOne()` buffering timed out after 10000ms at Timeout | 2023-08-20T08:08:18.026Z | [
{
"code": "const express = require(\"express\");\nconst bodyParser = require(\"body-parser\");\nconst crypto = require(\"crypto\");\nconst nodemailer = require(\"nodemailer\");\n\nconst app = express();\nconst port = 8000;\nconst cors = require(\"cors\");\napp.use(cors());\n\napp.use(bodyParser.urlencoded({ extended: false }));\napp.use(bodyParser.json());\n\nconst jwt = require(\"jsonwebtoken\");\n\nconst mongoose = require(\"mongoose\");\nmongoose\n .connect(\"mongodb://192.168.29.166/ecommerce-project\", {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n })\n .then(() => {\n console.log(\"connected to mongodb\");\n })\n .catch((err) => {\n console.log(\"error connecting to mongodb\", err);\n });\n\napp.listen(port, \"192.168.29.166\", () => {\n console.log(\"server is running on port 8000\");\n});\n\nconst User = require(\"./models/user\");\nconst Order = require(\"./models/order\");\n\n//function to send verification email to the user\n\nconst sendVerificationEmail = async (email, verificationToken) => {\n // create a nodemailer trasnsport\n\n const trasnsporter = nodemailer.createTransport({\n //configure the email service\n service: \"gmail\",\n auth: {\n user: \"[email protected]\",\n pass: \"ktyhihvqrwmqcpex\",\n },\n });\n\n //compose the email message\n const mailOptions = {\n from: \"amazon.com\",\n to: email,\n subject: \"Email Verification\",\n text: `Please click the following link to verify your email : http://localhost:8000/verify/${verificationToken}`,\n };\n\n // send the email\n try {\n await trasnsporter.sendMail(mailOptions);\n } catch (error) {\n console.log(\"Error sending verification email\", error);\n }\n};\n\n//endpoint to register in the app\n\napp.post(\"/register\", async (req, res) => {\n try {\n const { name, email, password } = req.body;\n\n //Chech if The email is already registered\n\n const existingUser = await User.findOne({ email });\n\n if (existingUser) {\n return res.status(400).json({ message: \"Email already Registered\" });\n }\n\n //create a new User\n const newUser = new User({ name, email, password });\n\n //generate and store the verification token\n newUser.verificationToken = crypto.randomBytes(20).toString(\"hex\");\n\n //save the user to the database\n await newUser.save();\n\n //send verification email to the user\n sendVerificationEmail(newUser.email, newUser.verificationToken);\n } catch (error) {\n console.log(\"error registering user\", error);\n res.status(500).json({ message: \"Registration failed\" });\n }\n});\n\n//endpoint tp verify the email\n\napp.get(\"/verify/token\", async (req, res) => {\n try {\n const token = req.params.token;\n\n //FInd the user with the given verification token\n const user = await User.findOne({ verificationToken: token });\n\n if (!user) {\n return res.status(404).json({ message: \"Invalid verification token\" });\n }\n\n //Mark the user as verified\n user, (verified = true);\n user.verificationToken = undefined;\n\n await user.save();\n\n res.status(200).json({ message: \"Email verified\" });\n } catch (error) {\n res.status(500).json({ message: \"Email veridication failed\" });\n }\n});\nconst mongoose = require(\"mongoose\");\n\nconst userSchema = new mongoose.Schema({\n name: {\n type: String,\n required: true,\n },\n email: {\n type: String,\n required: true,\n unique: true,\n },\n password: {\n type: String,\n required: true,\n },\n verified: {\n type: Boolean,\n default: false,\n },\n verificationToken: String,\n addresses: [\n {\n name: String,\n mobileNo: String,\n houseNo: String,\n street: String,\n landmark: String,\n city: String,\n country: String,\n postalCode: String,\n },\n ],\n orders: [\n {\n type: mongoose.Schema.Types.ObjectId,\n ref: \"Order\",\n },\n ],\n createdAt: {\n type: Date,\n default: Date.now,\n },\n});\n\nconst User = mongoose.model(\"User\", userSchema);\n\nmodule.exports = User;\n\n{\n \"name\": \"api\",\n \"version\": \"1.0.0\",\n \"description\": \"backend\",\n \"main\": \"index.js\",\n \"scripts\": {\n \"start\": \"nodemon index.js\",\n \"test\": \"echo \\\"Error: no test specified\\\" && exit 1\"\n },\n \"author\": \"\",\n \"license\": \"ISC\",\n \"dependencies\": {\n \"body-parser\": \"^1.20.2\",\n \"cors\": \"^2.8.5\",\n \"express\": \"^4.18.2\",\n \"jsonwebtoken\": \"^9.0.1\",\n \"mongoose\": \"^7.4.3\",\n \"nodemailer\": \"^6.9.4\",\n \"nodemon\": \"^3.0.1\"\n }\n}\n\n",
"text": "I’m new to MERN stack. while I was trying to do a simple register endpoint, I’m getting the below error in the command prompt, can someone try to help me out of this please, Thanks in advanceScreenshot of my terminal\ncmd1070×593 35.3 KB\nindex.jsuser.jspackage.json",
"username": "Vinothagan_J"
},
{
"code": "network access",
"text": "Hi @Vinothagan_J , welcome to the community.You are having a problem connecting with your database.If you are using the Atlas to create a cluster, probably you forgot to configure the network access from your project.If you are using another server, you need to ensure that it’s able to connect from your machine.",
"username": "Jennysson_Junior"
},
{
"code": "",
"text": "@Jennysson_Junior Thanks for you reply. but as I said I totally new to this.I’m still not sure, whether I’m connected or not.I have attached my Screenshots, if that helps, correct If I’m wrong at anything.\nMicrosoftTeams-image (6)1920×924 53 KB\n\nconnecttab1920×939 95.3 KB\nKindly help, Thanks in advance.",
"username": "Vinothagan_J"
},
{
"code": "mongoose\n .connect(YOUR_CONNECTION_STRING, {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n })\n",
"text": "connecttab1920×939 95.3 KBHere you can see your connection string in step 3. Look that you must complete the connection string with the password of your user db.You will use this connection on your code to connect with your cluster. Something like",
"username": "Jennysson_Junior"
},
{
"code": "",
"text": "@Jennysson_Junior Thanks for your reply,As you can see I have added my connection string as like you have mentioned.\nimage897×344 9.62 KB\nStruglging with this connection issue for a week now ",
"username": "Vinothagan_J"
},
{
"code": "",
"text": "Have solved the issues please, I’m having the same issue.",
"username": "Anas_Backend_Engineer"
},
{
"code": "",
"text": "@Vinothagan_J Have solved the issues please, I’m having the same issue.@Vinothagan_J Have solved the issues please, I’m having the same issue.",
"username": "Anas_Backend_Engineer"
},
{
"code": "",
"text": "No, I didn’t find the solution either.",
"username": "Vinothagan_J"
},
{
"code": "",
"text": "I am also having this issue i can’t find a fix it. another here got a solution?",
"username": "Its_Aqua"
}
] | [
"node-js",
"mongoose-odm"
] |
|
null | Reading “normal” JSON with pymongo from a collection | 104 | Reading "normal" JSON with pymongo from a collection | 2023-11-10T10:46:43.535Z | [
{
"code": "my_cursor = my_collection.find_one()\nprint (json.dumps(my_cursor))\nTypeError: Object of type datetime is not JSON serializable\nl_cursor = json.loads(bson.json_util.dumps(my_cursor))\nprint (json.dumps(l_cursor, indent=2))\n\"date\": { \"$date\": \"2019-01-20T05:00:00Z\"}\n\"key\": \"value\"\"key\": {\"$value type\": value}",
"text": "Hi all,sorry for my ignorance, I’m quite new to MongoDB and making my first steps.\nI have a task to read all documents from a MongoDB collection as straight-forward JSONs, using python.Assuming my connection works (it does), if i simply read the documents as-is and try to print them, python is throwing this type of exception:…What I have found is that I need to use the bson module to dump the data:It works, but the representation is a little bit unusual:I have found different workarounds, suggesting custom encoders, lambda functions and so on.My question: how do I get a representation \"key\": \"value\" instead of \"key\": {\"$value type\": value} in a more generic way? Without writing an encoder for all possible BSON data types, hopefully.Thank you and apologies for my ignorance once again.Best,\nMichael",
"username": "lynyrds"
},
{
"code": "my_document = my_collection.find_one()\nprint( my_document )\n",
"text": "From an example I found I gather that you might simply have toI renamed my_cursor from your code as my_document because find_one() returns a single document rather than a cursor. The find() method would return a cursor.",
"username": "steevej"
},
{
"code": "'date': datetime.datetime(2018, 12, 27, 5, 0)json.loads()find_one()",
"text": "You’re right, but then the BSON types are returned like this: 'date': datetime.datetime(2018, 12, 27, 5, 0)\nThat’s why json.loads() can’t serialise that.\nAnd it’s not really human-readable.I was using find_one() to simplify the example.",
"username": "lynyrds"
},
{
"code": "",
"text": "In this SO thread they mention to use default=str as an option.",
"username": "steevej"
},
{
"code": "datetimeDecimal128mongoexport",
"text": "That, i found too.\nIt looks better, of course.\nBut still the date should be ISO conform, an integer should remain an integer and a float should be a float, not a string.\nI wrote a basic encoder for the datetime and Decimal128, but it’s 20 BSON data types, give or take.\nI mean, there’s mongoexport tool which seems to do the job correctly, so I know it’s possible.",
"username": "lynyrds"
},
{
"code": "\"date\": { \"$date\": \"2019-01-20T05:00:00Z\"}\n",
"text": "This is above my little knowledge of python.But as for normal JSON, there is no Date format, no Decimal128 and no difference between integer and float. The official JSON types are array, object, string, number, boolean and null. BSON has a richer data type system and this is why EJSON was brought to life, that is why you get:",
"username": "steevej"
},
{
"code": "\"key\": \"value\"\"key\": {\"$value type\": value}docs = list(coll.find())\n\n# To encode as JSON:\ndocs_as_extended_json = bson.json_util.dumps(docs)\n\n# To decode the JSON back to python/pymongo objects:\ndocs_decoded = bson.json_util.loads(docs_as_extended_json)\nassert docs_decoded == docs\njson.loadsbson.json_util.loads",
"text": "My question: how do I get a representation \"key\": \"value\" instead of \"key\": {\"$value type\": value} in a more generic way? Without writing an encoder for all possible BSON data types, hopefully.My suggestion is to commit using MongoDB Extended JSON wherever possible. For example:Using MongoDB Extended JSON should make your life easier because it is crossplatform and supports encoding/decoding all the BSON types. The problem with your initial attempt is that you used json.loads to decode the JSON instead of bson.json_util.loads.",
"username": "Shane"
}
] | [
"python"
] |
null | MongoDB Tools and Rocky Linux | 114 | MongoDB Tools and Rocky Linux | 2023-11-09T04:37:58.121Z | [
{
"code": "",
"text": "Hello,I just installed ops manager 6 and deployed a new mongodb instance running in a Rocky Linux 9 distro. But my deployment never finishes. The error log show the following issue:Error loading desired cluster configs : [01:16:29.245] Error retrieving cluster config from ‘http://:8080/agents/api/automation/conf/v1/654a780e890175651ead6d6d?av=12.0.28.7763&aos=linux&aa=x86_64&ab=64&ad=&ah=l&ahs=&at=1699387830756’ : [01:16:29.245] Cluster config did not pass validation for pre-expansion semantics : MongoDB Tools download URL for this host was not found in the list of available URLS : [ {100.9.0 map[linux:map[amazon2:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon2-x86_64-100.9.0.tgz amzn64:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon-x86_64-100.9.0.tgz arm64_amazon2:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-amazon2-aarch64-100.9.0.tgz arm64_rhel82:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel82-aarch64-100.9.0.tgz arm64_ubuntu2204:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2204-arm64-100.9.0.tgz debian10:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian10-x86_64-100.9.0.tgz debian11:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian11-x86_64-100.9.0.tgz debian81:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian81-x86_64-100.9.0.tgz debian92:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-debian92-x86_64-100.9.0.tgz ppc64le_rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel71-ppc64le-100.9.0.tgz ppc64le_rhel8:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel81-ppc64le-100.9.0.tgz rhel57:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel62-x86_64-100.9.0.tgz rhel62:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel62-x86_64-100.9.0.tgz rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel70-x86_64-100.9.0.tgz rhel80:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel80-x86_64-100.9.0.tgz rhel90:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel90-x86_64-100.9.0.tgz s390x_rhel7:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-rhel72-s390x-100.9.0.tgz suse11:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse12-x86_64-100.9.0.tgz suse12:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse12-x86_64-100.9.0.tgz suse15:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-suse15-x86_64-100.9.0.tgz ubuntu1604:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu1604-x86_64-100.9.0.tgz ubuntu1804:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu1804-x86_64-100.9.0.tgz ubuntu2004:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2004-x86_64-100.9.0.tgz ubuntu2204:http://:8080/automation/mongodb-releases/hybrid/linux/mongodb-database-tools-ubuntu2204-x86_64-100.9.0.tgz] osx:map[default:http://:8080/automation/mongodb-releases/hybrid/macos/mongodb-database-tools-macos-x86_64-100.9.0.zip] windows:map[default:http://:8080/automation/mongodb-releases/hybrid/windows/mongodb-database-tools-windows-x86_64-100.9.0.zip]]} ]I installed the package redhat-lsb-release which mongodb documentation mentionsWhat could be ?",
"username": "Jose_Felipe_Goncalves_Rocha"
},
{
"code": "",
"text": "Hi @Jose_Felipe_Goncalves_Rocha,\nIt appears that the agent cannot determine the correct operating system to download the database tools, however, it is sincerely not clear to me on how the situation can be solved. do you have any suggestions @chris @Kushagra_Kesav?Best Regards",
"username": "Fabio_Ramohitaj"
},
{
"code": "",
"text": "Is that log verbatim or has it been redacted? Because the host portion of the URLs is missing.",
"username": "chris"
},
{
"code": "",
"text": "I removed the hostnames for security reasons.An update: I had this issue in a Rocky Linux 9. Then I downgraded the SO to version 8 and everything works fine.",
"username": "Jose_Felipe_Goncalves_Rocha"
},
{
"code": "",
"text": "Good to hear, Ops Manager 6.0.20 (Nov 03 2023) adds support for RHEL 9 for both Ops Manager and Managed deployments.",
"username": "chris"
}
] | [
"ops-manager"
] |
null | C#, GUID and MongoDB | 504 | C#, GUID and MongoDB | 2023-07-29T22:03:33.987Z | [
{
"code": "BsonSerializer.RegisterSerializer(new GuidSerializer(GuidRepresentation.Standard));return await (await _collection.FindAsync(x => x.id == id)).FirstOrDefaultAsync();BsonDefaults.GuidRepresentation = GuidRepresentation.Standard;",
"text": "Supposedly this topic was beaten to death but no.\nHave this, first thing in my code,\nBsonSerializer.RegisterSerializer(new GuidSerializer(GuidRepresentation.Standard));Insert happens fine, i see in Atlas _id: UUID(‘9ad10b5b-50b4-4e35-bd26-393be728215c’)\nThe LINQ search is not finding it. It comes back with null when id is Guid(‘9ad10b5b-50b4-4e35-bd26-393be728215c’)\nreturn await (await _collection.FindAsync(x => x.id == id)).FirstOrDefaultAsync();Everything works as soon as i add\nBsonDefaults.GuidRepresentation = GuidRepresentation.Standard;But it says it’s obsolete.",
"username": "George_Ter-Saakov"
},
{
"code": "",
"text": "I also have this issue",
"username": "pierre-luc_des"
}
] | [
"dot-net"
] |
null | New relic integration for mongodb atlas metrics volume estimate | 164 | New relic integration for mongodb atlas metrics volume estimate | 2023-11-03T13:50:03.215Z | [
{
"code": "",
"text": "Hello,We are trying to setup mongodb atlas monitoring integration provided through new relic and want to understand the volume of metrics being collected and feed to new relic. This will help us understand the potential cost from new relic as they charge based on the amount of data being sent. Looking for thoughts from the community if anyone has implemented the same in their environment.",
"username": "Ammara_Sheikh"
},
{
"code": "",
"text": "Hi @Ammara_SheikhThank you for your post and for being a part of the MongoDB community! The New Relic integration with MongoDB Atlas uses a Prometheus exporter that scrapes the MongoDB process.The exact payload size being sent to New Relic will depend on your environment. However, assuming you have a project with a single 3-node replica set, we would be sending around 110 KB every 10 seconds by default. This would equate to around 29GB per month if you wanted 10 second metrics resolution.You can further reduce the amount of data being sent by modifying the scrape_interval in the prometheus config file.I hope this helps answer your question! Please let me know if you have any other questions.Thanks!\nFrank",
"username": "Frank_Sun"
}
] | [] |
null | How to secure access to Mongo DB Atlas | 75 | How to secure access to Mongo DB Atlas | 2023-11-10T08:00:53.888Z | [
{
"code": "",
"text": "I want to use Mongo Atlas as my DB on my application, but I am facing an issue:I am using a Cloudflare Proxy. So anytime there is a new request to my website, the IP changes. So, how can I secure the access to my DB without having a unique IP address to access from?I’ve tried to add to the whitelist all the Cloudflare IPs, but Mongo does not accept IpV6 ranges. Is there any way to restrict the access by domain? Any other solution to have secure connection to MongoDB in my case?",
"username": "Javier_Alvarez1"
},
{
"code": "",
"text": "Note that Atlas maintains a posture of security in depth, requiring database authentication and TLS network encryption on top of the firewall: so you may consider opening up a wider public IP block",
"username": "Andrew_Davidson"
}
] | [
"node-js"
] |
null | AWS SDK v3 SQS SendMessage | 786 | AWS SDK v3 SQS SendMessage | 2023-07-29T02:53:11.891Z | [
{
"code": "",
"text": "Hello,Has anyone been successful is sending a message to AWS SQS using the JavaScript SDK v3? This is inside of an Atlas function.It simply will not work for me with a fifo queue.The same function has no issues interacting with S3.The error when trying to send an SQS message is that data must be a string or a buffer. (The data is a string)I know the code works though because identical code, package, etc. works in node 18.Thank you in advance!",
"username": "Dima"
},
{
"code": "",
"text": "Hi Dima,\nCan you share your code snippet and the full error message you’re seeing? Thanks!",
"username": "mpobrien"
},
{
"code": "exports = async function () {\n const AWS_CONFIG = {\n credentials: {\n accessKeyId: context.environment.values['AWS_ACCESS_KEY_ID'],\n secretAccessKey: context.values.get('AWS_SECRET_KEY_VALUE'),\n },\n region: context.values.get(\"AWS_REGION\"),\n }\n return AWS_CONFIG\n }\nexports = async function(changeEvent) {\n\n const AWS_CONFIG = await context.functions.execute('aws_getConfig')\n\n const { S3Client, PutObjectCommand } = require('@aws-sdk/client-s3')\n const { SQSClient, SendMessageCommand } = require('@aws-sdk/client-sqs')\n\n const { S3_BUCKET_ETL_ENV, SQS_QUEUE_ETL_URLS } = context.environment.values\n const S3Bucket = context.values.get(\"S3_ETL_BUCKET\")\n \n if (changeEvent && (changeEvent.operationType == 'insert' || changeEvent.operationType == 'delete' || changeEvent.operationType == 'update')) {\n const s3 = new S3Client(AWS_CONFIG)\n const sqs = new SQSClient(AWS_CONFIG)\n \n const collection = changeEvent.ns.coll\n const SQSQueueUrl = SQS_QUEUE_ETL_URLS[collection.toLowerCase()]\n \n let objectsToPutInS3 = [], objectsToPutInS3ForSQS = []\n \n const fullStringBody = changeEvent.fullDocument ? JSON.stringify(changeEvent.fullDocument) : JSON.stringify(changeEvent.documentKey)\n \n const baseKeyName = S3_BUCKET_ETL_ENV + \"/\" + collection + \"/\" + changeEvent.documentKey._id + \"-\" + Date.now()\n \n if(fullStringBody) {\n const anObj = {\n Bucket: S3Bucket,\n Key: baseKeyName,\n Body: fullStringBody\n }\n objectsToPutInS3ForSQS.push(anObj)\n objectsToPutInS3.push(new PutObjectCommand(anObj))\n }\n \n const s3Promises = objectsToPutInS3.map(object => s3.send(object).then(data => {\n console.log('S3 put object result: ' + JSON.stringify(data))\n return data\n }))\n await Promise.all(s3Promises)\n\n const sqsMsgBody = JSON.stringify({\n operation: changeEvent.operationType,\n S3FilePartsOfJSONDocument: objectsToPutInS3ForSQS.map(object => {return {Bucket: object.Bucket, Key: object.Key}})\n })\n\n console.log(`SQSQueueUrl: ${SQSQueueUrl} and is of type ${typeof SQSQueueUrl}`)\n //\"https://sqs.us-east-1.amazonaws.com/1234567890/MongoAtlasETLUsersDev.fifo\" and is of type string\n\n console.log(`collection: ${collection} and is of type ${typeof collection}`)\n //\"users\" and is of type string\n\n console.log(`sqsMsgBody: ${sqsMsgBody} and is of type ${typeof sqsMsgBody}`)\n //something like: \"{\\\"operation\\\":\\\"update\\\",\\\"S3FilePartsOfJSONDocument\\\":[{\\\"Bucket\\\":\\\"bucket-name\\\",\\\"Key\\\":\\\"dev/users/5bb2377885432223fg-1690592434481\\\"}]}\" and is of type string\n\n console.log(`baseKeyName: ${baseKeyName} and is of type ${typeof baseKeyName}`)\n //something like: \"dev/users/5bb2377885432223fg-1690592434481\" and is of type string\n \n //Everything is fine up to here\n try {\n const sqsResult = await sqs.send(new SendMessageCommand({\n QueueUrl: SQSQueueUrl,\n MessageGroupId: collection,\n MessageBody: sqsMsgBody,\n MessageDeduplicationId: baseKeyName\n }))\n } catch (e) {\n console.log('Error writing to SQS: ' + e)\n //error is: \"TypeError: Data must be a string or a buffer\"\n }\n }\n }\n{\"dependencies\":{\"@aws-sdk/client-s3\":\"~3.378.0\",\"@aws-sdk/client-sqs\":\"~3.378.0\"}}\n",
"text": "Hello Michael,Absolutely and thank you for responding.One function is for the aws config (credentials and region) in aws_getConfig.js:The function that tries to make the SQS call is triggered by a database trigger (it actually does both S3 and SQS - S3 works, SQS does not) - in Atlas_Triggers_ETL_Function:So, the full error message is \"“TypeError: Data must be a string or a buffer”The package.json looks like this:Thank you!",
"username": "Dima"
},
{
"code": "",
"text": "We arrived at this post as we are also facing the exact same problem with the V3 aws-sdk and SendMessageCommand.",
"username": "Mark_Johnson"
},
{
"code": "",
"text": "Hi Mark,Glad to know that I am not alone in this. Thanks for the reply.",
"username": "Dima"
},
{
"code": "",
"text": "As a result we are looking at EventBridge triggers instead, though looks like it will complicate the setup a little, particularly for your scenario.",
"username": "Mark_Johnson"
},
{
"code": "",
"text": "Hey @Dima @mpobrien any updates on this",
"username": "Todd_Stephenson"
}
] | [] |
null | Aggregate $match _id $eq $toObjectId not working | 24,789 | Aggregate $match _id $eq $toObjectId not working | 2022-02-06T16:10:18.497Z | [
{
"code": "db.getCollection('dlsComponents').aggregate([\n { $match: { library: 'Library1', collection: 'Collection1', media: 'Images', object: 'Image3' } }\n ])\n{ _id: ObjectId(\"61fc458b46d7874a3a97ef79\"),\n library: 'Library1',\n collection: 'Collection1',\n media: 'Images',\n object: 'Image3',\n info: 'Image: 1/1/Images/Image3 Info', …\ndb.getCollection('dlsComponents').aggregate([\n { $match: { _id: { $eq: { $toObjectId: \"61fc458b46d7874a3a97ef79\" } } } }\n ])\n",
"text": "Using Compass 1.30.1, I was testing an aggregation and getting unexpected results. A $match was not working as expected. The simplified aggregation is:And this gives the expected result by finding a document:try to get the same document by _id:does not find a document. Why does the second $match not find a document?",
"username": "David_Camps"
},
{
"code": "",
"text": "I found that:{ $match: { $expr: { $eq: [ ‘$_id’, ‘$$imageId’ ] } } }does work ($$imageId is an ObjectId used in the non-simplified aggregate). Maybe the { $eq: ‘$value’ } format does not work in pipelines.",
"username": "David_Camps"
},
{
"code": "$eq$eq: ObjectId(\"...\")",
"text": "Hi David,The $eq used in find()/$match (without $expr) must specify an exact value: https://docs.mongodb.com/manual/reference/operator/query/eq/You can use $eq: ObjectId(\"...\")Jess",
"username": "jbalint"
},
{
"code": "{ $match: { $expr : { $eq: [ '$_id' , { $toObjectId: \"61fc458b46d7874a3a97ef79\" } ] } } }\n",
"text": "Your last post made me think that may be $toObjectId works only inside $expr. I triedand it works.",
"username": "steevej"
},
{
"code": "",
"text": "See on a related topicand",
"username": "steevej"
},
{
"code": "",
"text": "even gpt4 doesn’t solve my problem, thank",
"username": "Crown_International_Technology_Pvt_Ltd_CIT"
},
{
"code": "",
"text": "Did you told GPT4 what was your problem? Or did you do like you did here., just saying that you have a problem.",
"username": "steevej"
}
] | [
"aggregation",
"compass"
] |
null | Realm Error No Subscription for WRITE: When doing multiple subscriptions of different type | 133 | Realm Error No Subscription for WRITE: When doing multiple subscriptions of different type | 2023-11-07T14:26:49.413Z | [
{
"code": "override fun configureTheRealm() {\n if (user != null) {\n val config = SyncConfiguration.Builder(user, setOf(EmailAccount::class, ApplicationAccount::class))\n .initialSubscriptions { sub ->\n add(\n query = sub.query<EmailAccount>(\"ownerId == $0\", user.id),\n name = \"User's EmailAccounts\"\n )\n add(\n query = sub.query<ApplicationAccount>(\"ownerId == $0\", user.id),\n name = \"User's Application Account\"\n )\n }\n .log(LogLevel.ALL)\n .build()\n realm = Realm.open(config)\n Log.d(\"USERID\", \"$user\")\n }\n }\n",
"text": "I am trying to configure the realm with the help of the below function. Initially, there was only EmailAccount class after adding ApplicationAccount class while writing to EmailAccount cluster it is working but for ApplicationAccount it is saying Cannot write to class ApplicationAccount when no flexible sync subscription has been created.How to solve this ??",
"username": "Meet_Soni1"
},
{
"code": "realm.subscriptions.errorMessage",
"text": "Hmm, that is a bit surprising. It could point to a problem with the subscriptions somehow.What is the output of realm.subscriptions.errorMessage?",
"username": "ChristanMelchior"
},
{
"code": "",
"text": "image1668×497 67.5 KB",
"username": "Meet_Soni1"
},
{
"code": "",
"text": "Is there anything I am missing? please help me",
"username": "Meet_Soni1"
}
] | [
"atlas-device-sync",
"android",
"kotlin"
] |
null | Announcement: Intelligent Query Generation in MongoDB Compass | 326 | Announcement: Intelligent Query Generation in MongoDB Compass | 2023-10-05T13:05:08.468Z | [
{
"code": "",
"text": "Last week, we announced a new, intelligent query generation experience in MongoDB Compass. With the help of AI, you can generate queries and aggregations from natural language. This feature is now available in public preview.Please be sure to download the latest version of Compass to try it out. If you have any feedback, we’d love to hear it! Let us know what you think by posting in our feedback portal.You can learn more about this feature by reading our docs. To also learn about the other intelligent experiences for MongoDB announced last week, check out our blog.",
"username": "Julia_Oppenheim"
},
{
"code": "",
"text": "Hello! I successfully installed and utilized this feature on Windows. However, to my surprise, after downloading the 64-bit version for Mac, the option (feature preview) doesn’t appear in the settings menu.P.S.: I’m currently using Compass 1.40.4. Could you please assist me with this issue?",
"username": "ricardohsmello"
},
{
"code": "",
"text": "Hi @ricardohsmello - thanks for posting here. I can’t reproduce this. Would you mind sharing a screenshot of what you see? (Feel free to message me privately if you’d prefer). Also, do you see “Generate Query” in the query bar?",
"username": "Julia_Oppenheim"
},
{
"code": "",
"text": "Hi Julia,\nFirstly, thank you for responding to my message.\nThe \"Generate Query\"option does not show.Attached are the screenshots for your reference.FE0015E9-3A8B-46BE-9C3A-938AF2F1B65F2116×1686 247 KB\n9ACC4478-F0D2-4A58-BB57-0047334AEC452326×406 75.8 KB\n919929C4-BDCC-4781-807E-300E60D4C3001126×640 114 KB",
"username": "ricardohsmello"
},
{
"code": "",
"text": "Thank you, this is helpful! Two things may be happening:Please let me know if either of those points help.",
"username": "Julia_Oppenheim"
}
] | [
"aggregation",
"compass"
] |
null | Is it possible to correctly use the UUIDv7 format in mongodb and all his benefit today? (c# environment) | 180 | Is it possible to correctly use the UUIDv7 format in mongodb and all his benefit today? (c# environment) | 2023-11-01T20:52:17.357Z | [
{
"code": "",
"text": "We are currently in a c# environment and wondering if we can implement the uuidv7 format in mongodb and also get the benefit from it so not just have the same behavior as uuidv4.There seem to be some way to do this:What would be the best way to implement this? If it’s actually possible as today…\nThank you!",
"username": "pierre-luc_des"
},
{
"code": "IIdGeneratorBsonSerializer.RegisterIdGenerator(typeof(Guid), new UuidV7Generator());UuidV7Generator",
"text": "Hi, @pierre-luc_des,Welcome to the MongoDB Community Forums. I understand that you want to use UUIDv7 with the .NET/C# Driver.Let’s start by talking about how UUID formats currently supported by the driver and MongoDB - UUIDv3 and UUIDv4. The main difference between UUIDv3 and UUIDv4 is that UUIDv4 specifies a consistent serialized byte ordering regardless of the language or endianness of your CPU whereas UUIDv3 does not. The main difference between UUIDv4 and UUIDv7 is the generation mechanism. Thus you could use UUIDv4 GUIDs in your application and register a new IIdGenerator that uses the UUIDv7 generation mechanism. You could then register it via BsonSerializer.RegisterIdGenerator(typeof(Guid), new UuidV7Generator()); wher UuidV7Generator is your generator.Hope that helps!Sincerely,\nJames",
"username": "James_Kovacs"
}
] | [
"dot-net"
] |
null | TLS NETWORK error in mongo stateful sets | 382 | TLS NETWORK error in mongo stateful sets | 2023-10-23T17:08:23.922Z | [
{
"code": "",
"text": "Hello,After configuring mongodb as StatefulSets with tls mode = requireTLS I get this NETWORK error:\n“c”:“NETWORK”, “id”:22588, “ctx”:“conn527”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:527}}\nThe error seems to come from internal communications between services and StatefulSets.\nI don’t make any request from specific client.\nWith tls mode= preferedTLS it works.\nPlease advice!",
"username": "Myq"
},
{
"code": "",
"text": "Seems something in your K8S (assume you use k8s as you mentioned stateful set) is using non-tls connection to your mongodb server.You need to figure what that something is and then try configuring it to use tls. (i’m not familiar with stateful set)",
"username": "Kobe_W"
},
{
"code": "",
"text": "Thank you Kobe.\nIndeed I use K8S. MongoDB server is started on TLS config.\nThe complete message is below:\n“c”:“NETWORK”, “id”:22942, “ctx”:“listener”,“msg”:“Connection accepted”,“attr”:{“remote”:“100.62.8.86:44503”,“connectionId”:13,“connectionCount”:1}}\n“c”:“NETWORK”, “id”:22986, “ctx”:“conn13”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:13}}\n“c”:“NETWORK”, “id”:22942, “ctx”:“conn13”,“msg”:“Connection ended”,“attr”:{“remote”:“100.62.8.86:44503” ,“connectionId”:13,“connectionCount”:0}}\nI have a service for each replica and a headless service(plus loopback 127.0.0.1). These services communicate (internally) with statefulset and throws these errors.\nBR",
"username": "Myq"
},
{
"code": "tls=true",
"text": "100.62.8.86:44503Is this one of the replicas , a service or another connection.Any connections other then the replica set members will need to transition to tls by adding tls=true to the connection string.",
"username": "chris"
},
{
"code": "",
"text": "Thank you Chris. I knew it. But where should I mention this tls=true? Is there any magic flag? Or should I make a new kind of chart?\nMy service looks like this:\nkind: Service\nmetadata:\nname: myname - internal\nspec:\nclusterIP: None\nports:\n- name: mongodb\nport: 27017\nselector:\nname: myname",
"username": "Myq"
},
{
"code": "mongodb://username:password@host1,host2,host3/?tls=true",
"text": "Hi @MyqSpecifying connection options may be specific to the application, refer to its documentation .The majority of applications will accept a connections string in the form:\nmongodb://username:password@host1,host2,host3/?tls=trueref:\nhttps://www.mongodb.com/docs/manual/reference/connection-string",
"username": "chris"
},
{
"code": "",
"text": "Yes, but this is available when I make a connection from a specific client (another app as you mentioned) to my mongodb replicaset. In my case the problem occurs when I start mongodb replicaset as a server , because of the internal services communications. Along with these errors, the PODs crashed (scales down).",
"username": "Myq"
},
{
"code": "",
"text": "Are you rolling this yourself or using the MongoDB K8S Operator?Along with these errors, the PODs crashed (scales down).The mongod logs / container logs are going to tell you exactly what is going wrong.",
"username": "chris"
},
{
"code": "",
"text": "oc logs mymongo-0 shows the above errors. Is there any logs more precisely?",
"username": "Myq"
},
{
"code": "",
"text": "That’s not an error, thats an info level log.You have stated that it is crashing, there will be logs to indicate why that is the case.",
"username": "chris"
},
{
"code": "",
"text": "These are the only errors I see. Maybe should I switch to debug error level. However seems to be a NETWORK error:\n“c”:“NETWORK”, “id”:22986, “ctx”:“conn13”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“100.62.8.86:44503”,“connectionId”:13}}",
"username": "Myq"
},
{
"code": "\"s\":\"I\"",
"text": "Again that is not an error. This is logged at \"s\":\"I\" an info level log. It is logging the fact a client tried to connect without TLS. It is NOT going to crash a mongod.You will need to look deeper into your log, if there is a ‘crash’ logs of severity Error or Fatal are common.Otherwise the container logs may tell you what is going on.Are you rolling this yourself or using the MongoDB K8S Operator?If you’re rolling this yourself what does the healthcheck look like?ref:\nhttps://www.mongodb.com/docs/manual/reference/log-messages/#structured-logging",
"username": "chris"
},
{
"code": "",
"text": "I rolling by myself. Indeed 100.62.8.86:44503 was another service. I manage to scale down. However now I have the same error from loopback= 127.0.0.1 and the PODs crash after a while.\n“s”:“I”, “c”:“NETWORK”, “id”:22988, “ctx”:“conn37”,“msg”:“Error receiving request from client. Ending connection from remote”,“attr”:{“error”:{“code”:141,“codeName”:“SSLHandshakeFailed”,“errmsg”:“The server is configured to only allow SSL connections”},“remote”:“127.0.0.1:54800”\nHowever for a short period I can connect to the database (from a specific client). The TLS/Certificate seems to work fine.\nAlso I tried to replace bind_ip = 0.0.0.0 with everything else , except localhost/127.0.0.1. The error message disappear but the StatefulSet doesn’t work. I guess localhost/127.0.0.1 is mandatory.",
"username": "Myq"
},
{
"code": "",
"text": "I rolling by myselfIf you’re rolling this yourself what does the healthcheck look like?What have you configured for your probes?Are you aware of the MongoDB Community Kubernetes Operator ?",
"username": "chris"
},
{
"code": "",
"text": "Basically I use this readiness probe:\nports:\n- containerPort: 27017\nreadinessProbe:\nexec:\ncommand:\n- mongo\n- --eval\n- “db.runCommand({ ping: 1})”\ninitialDelaySeconds: 10\nperiodSeconds: 10\nsuccessThreshold: 1\nfailureThreshold: 3but only for “preferTLS” where everything goes well. For “requireTLS” I have this error:\nReadiness probe failed: MongoDB shell version v5 connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb Error: network error while attempting to run command ‘isMaster’ on host ‘127.0.0.1:27017’ : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1\nIn this case mongo-1 and mongo-2 do not get to generate.\n(I know that MongoDB Community Kubernetes Operator is related only to the cloud. I’m working on prem)",
"username": "Myq"
},
{
"code": "",
"text": "(I know that MongoDB Community Kubernetes Operator is related only to the cloud. I’m working on prem)What leads you to this conclusion?Basically I use this readiness probe:If you update to TLS then this would also be required to use TLS. This is why your pod is being stopped(“crashing”), the healthcheck cannot connect.",
"username": "chris"
},
{
"code": "",
"text": "ld also be rIf I use this readiness probe:\nreadinessProbe:\nexec:\ncommand:\n- mongo\n- --tls\n- -u admin\n- -p MONGO_ADMIN_PASSWORD\n- --tlsCertificateKeyFile=/tlsFolder/mongodb.pem\n- --eval\n- “db.runCommand({ ping: 1})”\ninitialDelaySeconds: 10\nperiodSeconds: 10\nsuccessThreshold: 1\nfailureThreshold: 3\nI have this error:\nReadiness probe failed: MongoDB shell version connecting to: mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb {“t”:{“$date”:“2023-11-06T07:22:43.869Z”},“s”:“I”, “c”:“NETWORK”, “id”:5490002, “ctx”:“thread1”,“msg”:“Started a new thread for the timer service”} {“t”:{“$date”:“2023-11-06T07:22:43.879Z”},“s”:“E”, “c”:“NETWORK”, “id”:23256, “ctx”:“js”,“msg”:“SSL peer certificate validation failed”,“attr”:{“error”:“SSL peer certificate validation failed: self signed certificate”}} Error: couldn’t connect to server 127.0.0.1:27017, connection attempt failed: SSLHandshakeFailed: SSL peer certificate validation failed: self signed certificate : connect@src/mongo/shell/mongo.js:374:17 @(connect):2:6 exception: connect failed exiting with code 1\nCommon Name (CN) is: localhost.\nMy .pem file is not located in /etc/mongo/ but in other path. Maybe this could be a problem.",
"username": "Myq"
},
{
"code": "",
"text": "SSL peer certificate validation failed: self signed certificateWhere is the CA file for this certificate? You’ll need to specify that on the command line too.Are you using client certificates in this deployment too or are you confusing options ?",
"username": "chris"
},
{
"code": "",
"text": "Yes seems to be a bug in the certificate.",
"username": "Myq"
}
] | [
"containers"
] |
null | Trigger Match Expression for updatedAt | 110 | Trigger Match Expression for updatedAt | 2023-11-09T17:56:24.238Z | [
{
"code": "{\n \"$or\": [\n {\n \"operationType\": \"insert\"\n },\n {\n \"operationType\": \"update\",\n \"updateDescription.updatedFields\": {\n \"$not\": {\n \"$eq\": [\n {\n \"updatedAt\": \"$updateDescription.updatedFields.updatedAt\"\n }\n ]\n }\n }\n }\n ]\n}\n",
"text": "I am setting up a database trigger for insert and update. The function this will trigger will either add createdAt & updatedAt or just update the updatedAt.ISSUE: This match expression is not filtering out the document.updates where updatedAt was the only field updated. This is causing an endless loop of triggers.expression ==",
"username": "Todd_Stephenson"
},
{
"code": "{\n \"$or\": [\n {\n \"operationType\": \"insert\"\n },\n {\n \"operationType\": \"update\",\n \"updateDescription.updatedFields.updatedAt\": {\n $exists: false,\n }\n }\n ]\n}\n",
"text": "Hi, I believe there is a bit of a misunderstanding in how the match expression works (also, I am not convinced it is valid synctax with the $eq). I think this should work, but let me know if it is not what you are looking for:",
"username": "Tyler_Kaye"
},
{
"code": "exports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne(\n { _id: changeEvent.documentKey._id },\n {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n }\n )\n .then(result => {\n console.log(`Inserted document in ${collectionName} collection with createdAt and updatedAt fields.`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n // If it's an update, only set updatedAt\n collection.updateOne(\n { _id: changeEvent.documentKey._id },\n {\n $set: {\n updatedAt: currentDate\n }\n }\n )\n .then(result => {\n console.log(`Updated document in ${collectionName} collection with updatedAt field.`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\n",
"text": "Hey @Tyler_Kaye , Thank you for the quick reply.edited: I didn’t have the trigger active, oops. Sorry!issue:\nOn insert it triggers the function 3 times. Here is my function:Screenshot 2023-11-09 at 1.08.29 PM2552×1508 262 KB",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "Sorry, having a hard time parsing if you are saying it worked or if it did not work?",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Hey @Tyler_Kaye , I just edited my comment above. I didn’t have the trigger enabled lol",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "The update filter works perfectly! @Tyler_Kaye",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "It happens to us all sometimes Best,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "I just need help with no having 3 triggers on document insert. @Tyler_Kaye",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "I have to step out, but I am not sure what you mean?",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "When I insert a document the triggers gets executed 3 times then it stops. It should only get triggered once @Tyler_Kaye",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "Can you send a link to your trigger?",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "@Tyler_Kaye Here you go: App Services",
"username": "Todd_Stephenson"
},
{
"code": "",
"text": "Can you add to the printing to add the document’s _id to the statement (and possibly the updateDescription using EJSON.stringify()). I cant tell if something is going wrong or if there is just something inserting an object and then something updating an object",
"username": "Tyler_Kaye"
},
{
"code": "exports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n const documentId = changeEvent.documentKey._id; // Get the _id of the document\n\n // Import the EJSON module\n const EJSON = require('mongodb-extended-json');\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n const updateDescription = {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n };\n\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Inserted document with _id: ${documentId} in ${collectionName} collection with createdAt and updatedAt fields. Update Description: ${EJSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n const updateDescription = {\n $set: {\n updatedAt: currentDate\n }\n };\n\n // If it's an update, only set updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Updated document with _id: ${documentId} in ${collectionName} collection with updatedAt field. Update Description: ${EJSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\nexports = function(changeEvent) {\n const dbName = changeEvent.ns.db;\n const collectionName = changeEvent.ns.coll;\n const collection = context.services.get('mongodb-atlas').db(dbName).collection(collectionName);\n const currentDate = new Date();\n const documentId = changeEvent.documentKey._id; // Get the _id of the document\n\n // Check the operation type\n if (changeEvent.operationType === 'insert') {\n const updateDescription = {\n $set: {\n createdAt: currentDate,\n updatedAt: currentDate\n }\n };\n\n // If it's an insert, set both createdAt and updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Inserted document with _id: ${documentId} in ${collectionName} collection with createdAt and updatedAt fields. Update Description: ${JSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting createdAt and updatedAt on insert:', error);\n });\n } else if (changeEvent.operationType === 'update') {\n const updateDescription = {\n $set: {\n updatedAt: currentDate\n }\n };\n\n // If it's an update, only set updatedAt\n collection.updateOne({ _id: documentId }, updateDescription)\n .then(result => {\n console.log(`Updated document with _id: ${documentId} in ${collectionName} collection with updatedAt field. Update Description: ${JSON.stringify(updateDescription)}`);\n })\n .catch(error => {\n console.error('Error setting updatedAt on update:', error);\n });\n }\n\n return;\n};\n",
"text": "@Tyler_Kaye Does this work? I couldn’t use EJSON , I got an error “Cannot find module ‘mongodb-extended-json’”. This is the code for the function I wrote with EJSONHere is the code that actually worked",
"username": "Todd_Stephenson"
}
] | [] |
null | Can I validate fields in one collection against values in another? | 73 | Can I validate fields in one collection against values in another? | 2023-11-10T05:41:58.092Z | [
{
"code": "",
"text": "I would like to make sure that db.CollectionX.fieldY is constrained to values in db.CollectionY. Essentially this is a schema enum, but the enum has to be drawn from another collection, not typed directly in the schema. (This would violate DRY in our application, since we also need these values elsewhere in the code.)",
"username": "Eugene_Callahan1"
},
{
"code": "db.CollectionYdb.CollectionXdb.CollectionYdb.CollectionYdb.CollectionXraise NoEnum unless CollectionWhy.where({ value: value_to_test }).exists?\n",
"text": "What will happen if the values in db.CollectionY get deleted?If the answer is along the lines of nothing should happen to db.CollectionX or the values in db.CollectionY don’t ever get deleted, the solution is to validate the expected value in db.CollectionY in the application layer (probably in controllers if the project structure mentioning has one) before writing to db.CollectionX.That is what I would do, given only the info you mentioned.",
"username": "3Ji"
},
{
"code": "",
"text": "We ALREADY validate the values in the application layer. The issue is, let’s say someone enters ‘Otters’ (in collections ‘Animals’) as ‘Reptiles’ (from collection ‘Classes’). Now we’d like someone using mongosh to be able to correct that to ‘Mammals’. They go to do that, but they mistype the Class as ‘Mamals’. (Humans will do such things!) It would be nice if the DB itself could block such a typo, since ‘Mamals’ is not a valid member of ‘Classes’.",
"username": "Eugene_Callahan1"
},
{
"code": "",
"text": "My message is packed in my question. You can’t just take the second half and ignore the first half.If the validations you are looking for do exist, working the way you wanted it to be, what should happen in the scenario described?Different situations call for different opinions; it is hard to satisfy everyone. Some want it the foreign-key-constrain way, some don’t. The foreign-key way has some more issues of its own. (The ones who don’t include me.)Like a lot of things in MongoDB, the database puts the decision power in the hands of applications. Which, IMO, is a way better design than RDBMS.I don’t see your whole situation, but for me, if my application doesn’t support the correction without proper validations, it is incomplete —no mongosh editing production data.\nHowever, I understand that your situation can be different. I just happened to use the language that solved what you mentioned. It is not really my accomplishment, but people in the community.Just to be clear, I use Ruby. In production, we REPL on the actual models with validations intact. We even wrote troubleshooting scripts for common scenarios and will add UI if the issue is frequent.I don’t know every nook and cranny of MongoDB, but I ran production applications with millions of documents and proper business logic for years. I am not aware of such functionality of MongoDB; it might exist, but I don’t know and never heard of it. They provide building blocks to add such functionality when the situation calls for.",
"username": "3Ji"
},
{
"code": "",
"text": "I really just needed to know if this could be done. I guess the answer is “no.” But I do appreciate the additional essay on software engineering that accompanied the answer.",
"username": "Eugene_Callahan1"
}
] | [] |
Docker replica not copy to secondary | 255 | Docker replica not copy to secondary | 2023-10-26T09:47:04.591Z | [
{
"code": "version: '3'\nservices:\n mongo1:\n hostname: mongo1\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.15:27017:27017 \n volumes:\n - ./db-data1:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n mongo2:\n hostname: mongo2\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.16:27017:27017\n volumes:\n - ./db-data2:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n mongo3:\n hostname: mongo3\n image: mongo\n env_file: .env\n expose:\n - 27017\n environment:\n - MONGO_INITDB_DATABASE=${MONGO_INITDB_DATABASE}\n - MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}\n - MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}\n networks:\n - mongo-network\n ports:\n - 172.16.50.17:27017:27017\n volumes:\n - ./db-data3:/data/db\n - ./replica.key:/etc/replica.key\n restart: always\n command: mongod --replSet my-mongo-set --keyFile /etc/replica.key --bind_ip_all\n\n mongoinit:\n image: mongo\n hostname: mongo\n env_file: .env\n networks:\n - mongo-network\n restart: \"no\"\n depends_on:\n - mongo1\n - mongo2\n - mongo3\n # command: tail -F anything\n command: >\n mongosh --host 172.16.50.15:27017 --username ${MONGO_INITDB_ROOT_USERNAME} --password ${MONGO_INITDB_ROOT_PASSWORD} --eval \n '\n config = {\n \"_id\" : \"my-mongo-set\",\n \"members\" : [\n {\n \"_id\" : 0,\n \"host\" : \"172.16.50.15:27017\",\n \"priority\": 3\n },\n {\n \"_id\" : 1,\n \"host\" : \"172.16.50.16:27017\",\n \"priority\": 2\n },\n {\n \"_id\" : 2,\n \"host\" : \"172.16.50.17:27017\",\n \"priority\": 1\n }\n ]\n };\n rs.initiate(config, { force: true });\n rs.status();\n '\n\n\nvolumes:\n db-data1:\n db-data2:\n db-data3:\n\nnetworks:\n mongo-network:\n driver: bridge\n{\n set: 'my-mongo-set',\n date: 2023-10-26T09:29:47.471Z,\n myState: 1,\n term: Long(\"1\"),\n syncSourceHost: '',\n syncSourceId: -1,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-10-26T09:29:47.126Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1698312587, i: 1 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1698312557, i: 1 }),\n electionCandidateMetrics: {\n lastElectionReason: 'electionTimeout',\n lastElectionDate: 2023-10-26T09:22:36.919Z,\n electionTerm: Long(\"1\"),\n lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n numVotesNeeded: 2,\n priorityAtElection: 3,\n electionTimeoutMillis: Long(\"10000\"),\n numCatchUpOps: Long(\"0\"),\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n wMajorityWriteAvailabilityDate: 2023-10-26T09:22:37.674Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 445,\n optime: [Object],\n optimeDate: 2023-10-26T09:29:47.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 441,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:37.000Z,\n optimeDurableDate: 2023-10-26T09:29:37.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n lastHeartbeat: 2023-10-26T09:29:45.731Z,\n lastHeartbeatRecv: 2023-10-26T09:29:46.705Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 442,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:37.000Z,\n optimeDurableDate: 2023-10-26T09:29:37.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:47.126Z,\n lastDurableWallTime: 2023-10-26T09:29:47.126Z,\n lastHeartbeat: 2023-10-26T09:29:45.731Z,\n lastHeartbeatRecv: 2023-10-26T09:29:46.706Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698312587, i: 1 }),\n signature: {\n hash: Binary.createFromBase64(\"ib1dE3JOZYGqKad6qnbOxj2NWXo=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1698312587, i: 1 })\n}\n{\n set: 'my-mongo-set',\n date: 2023-10-26T09:30:03.394Z,\n myState: 2,\n term: Long(\"1\"),\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-10-26T09:29:57.126Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1698312597, i: 1 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1698312577, i: 1 }),\n electionParticipantMetrics: {\n votedForCandidate: true,\n electionTerm: Long(\"1\"),\n lastVoteDate: 2023-10-26T09:22:36.925Z,\n electionCandidateMemberId: 0,\n voteReason: '',\n lastAppliedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n maxAppliedOpTimeInSet: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n priorityAtElection: 1,\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n newTermAppliedDate: 2023-10-26T09:22:37.675Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 457,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n optimeDurableDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n lastHeartbeat: 2023-10-26T09:30:02.732Z,\n lastHeartbeatRecv: 2023-10-26T09:30:01.754Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 457,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n optimeDurableDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n lastHeartbeat: 2023-10-26T09:30:02.265Z,\n lastHeartbeatRecv: 2023-10-26T09:30:02.731Z,\n pingMs: Long(\"2\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 461,\n optime: [Object],\n optimeDate: 2023-10-26T09:29:57.000Z,\n lastAppliedWallTime: 2023-10-26T09:29:57.126Z,\n lastDurableWallTime: 2023-10-26T09:29:57.126Z,\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1698312597, i: 1 }),\n signature: {\n hash: Binary.createFromBase64(\"IVAPZTToW+9cNtRIV/PiQSvQQaM=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1698312597, i: 1 })\n}\n",
"text": "Hello all,\nIve set docker env for create MongoDB replication with this docker compose:My env is running but i not understand why my doc is only on primary and not copy to the secondary db.this is my rs.status() on primaryinside secondary rs.status()and capture screen to mongo compass :\nreplicate\nsecondary 1\nsecondary 2TestMongoDB1639×1003 210 KBwhat did i forget ?",
"username": "Jeremy_Kermes"
},
{
"code": "",
"text": "Nobody has an idea ?",
"username": "Jeremy_Kermes"
},
{
"code": "use testMongo\nshow collections\ndb.testMongoCollection.find()\n",
"text": "Have you refreshed the Compass database views on the secondary? Compass does not automatically refresh the list of database or collections.In the mongosh, of Compass connected to a secondary run the commands:Personally, I would use the host names mongo1, mongo2 and mongo3 in the list of replica set members rather than the IP address.",
"username": "steevej"
},
{
"code": "{\n set: 'my-mongo-set',\n date: 2023-11-07T07:32:31.942Z,\n myState: 2,\n term: Long(\"1\"),\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n heartbeatIntervalMillis: Long(\"2000\"),\n majorityVoteCount: 2,\n writeMajorityCount: 2,\n votingMembersCount: 3,\n writableVotingMembersCount: 3,\n optimes: {\n lastCommittedOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n lastCommittedWallTime: 2023-11-07T07:32:23.683Z,\n readConcernMajorityOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n appliedOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n durableOpTime: { ts: Timestamp({ t: 1699342343, i: 3 }), t: Long(\"1\") },\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z\n },\n lastStableRecoveryTimestamp: Timestamp({ t: 1699342332, i: 1 }),\n electionParticipantMetrics: {\n votedForCandidate: true,\n electionTerm: Long(\"1\"),\n lastVoteDate: 2023-10-26T09:22:36.925Z,\n electionCandidateMemberId: 0,\n voteReason: '',\n lastAppliedOpTimeAtElection: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n maxAppliedOpTimeInSet: { ts: Timestamp({ t: 1698312145, i: 1 }), t: Long(\"-1\") },\n priorityAtElection: 2,\n newTermStartDate: 2023-10-26T09:22:37.059Z,\n newTermAppliedDate: 2023-10-26T09:22:37.668Z\n },\n members: [\n {\n _id: 0,\n name: '172.16.50.15:27017',\n health: 1,\n state: 1,\n stateStr: 'PRIMARY',\n uptime: 1030205,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n optimeDurableDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n lastHeartbeat: 2023-11-07T07:32:31.289Z,\n lastHeartbeatRecv: 2023-11-07T07:32:31.284Z,\n pingMs: Long(\"2\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '',\n syncSourceId: -1,\n infoMessage: '',\n electionTime: Timestamp({ t: 1698312156, i: 1 }),\n electionDate: 2023-10-26T09:22:36.000Z,\n configVersion: 1,\n configTerm: 1\n },\n {\n _id: 1,\n name: '172.16.50.16:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 1030209,\n optime: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1,\n self: true,\n lastHeartbeatMessage: ''\n },\n {\n _id: 2,\n name: '172.16.50.17:27017',\n health: 1,\n state: 2,\n stateStr: 'SECONDARY',\n uptime: 1030205,\n optime: [Object],\n optimeDurable: [Object],\n optimeDate: 2023-11-07T07:32:23.000Z,\n optimeDurableDate: 2023-11-07T07:32:23.000Z,\n lastAppliedWallTime: 2023-11-07T07:32:23.683Z,\n lastDurableWallTime: 2023-11-07T07:32:23.683Z,\n lastHeartbeat: 2023-11-07T07:32:31.287Z,\n lastHeartbeatRecv: 2023-11-07T07:32:31.288Z,\n pingMs: Long(\"1\"),\n lastHeartbeatMessage: '',\n syncSourceHost: '172.16.50.15:27017',\n syncSourceId: 0,\n infoMessage: '',\n configVersion: 1,\n configTerm: 1\n }\n ],\n ok: 1,\n '$clusterTime': {\n clusterTime: Timestamp({ t: 1699342343, i: 3 }),\n signature: {\n hash: Binary.createFromBase64(\"SWPKqfLLavFMGccTgAR2u67eHTk=\", 0),\n keyId: Long(\"7294195172714217475\")\n }\n },\n operationTime: Timestamp({ t: 1699342343, i: 3 })\n}\n\n",
"text": "I try but ive got this error message.ProblémeMongo1631×972 73.7 KBI don’t understand i have Not primary and secondaryOK=False\nwith my rs.status() OK = 1 .I use 3 IP address because in the futur, where my replica work fine, i move docker instance of mongo on 3 PC on 3 localstorages.",
"username": "Jeremy_Kermes"
},
{
"code": "mongodb://user:[email protected]:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&directConnection=true\nmongodb://user:[email protected]:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&readPreference=secondaryPreferred\n",
"text": "With this new error, after search on google i edit my string connection to my secondary by :Before:Now:Now with this new string connection i see my replicate collection, but ive a new question.In my MONGOSH prompt i see my-mongo-set [primary] test >Why i see [primary] ? My string connection content IP to the secondary, not for the primary ?Why if i use directConnection on primary i can see this data but not on secondary ?ProblémeMongo21633×822 52.6 KB",
"username": "Jeremy_Kermes"
},
{
"code": "directConnection=truereadPreference=secondaryPreferreddirectConnection=truereadPreference=secondaryPreferred",
"text": "Why i see [primary] ? My string connection content IP to the secondary, not for the primary ?Unless directConnection=true the client(Compass) will perform cluster discover and connect to the primary.\nreadPreference=secondaryPreferred willread from a secondary. The connections you have made could be reading from the exact same secondary.To read from a specific secondary both directConnection=true and readPreference=secondaryPreferred need to be specified.",
"username": "chris"
},
{
"code": "",
"text": "Ok if i understand, i need use directConnection for verify if my documents is copy correctly on the secondary ?",
"username": "Jeremy_Kermes"
},
{
"code": "",
"text": "On a specific secondary, yes.",
"username": "chris"
},
{
"code": "",
"text": "Ok a set my string connection with directConnection and i don’t see my document on secondary 1 and secondary 2.mongodb://user:[email protected]:27017/?authMechanism=DEFAULT&replicaSet=my-mongo-set&directConnection=trueI don’t understand why my primary is not copied to the secondary.ProblémeMongo21296×583 24.8 KB",
"username": "Jeremy_Kermes"
},
{
"code": "directConnection=truereadPreference=secondaryPreferred",
"text": "We already covered this.To read from a specific secondary both directConnection=true and readPreference=secondaryPreferred need to be specified.Here is an example:image1959×957 135 KB",
"username": "chris"
},
{
"code": "",
"text": "The answer from chris came at the same time and was covering the same. Please ignore.",
"username": "steevej"
},
{
"code": "",
"text": "Indeed, withdirectConnection=true&readPreference=secondaryPreferredI can find all my documents.\nThank you all for your help and i’m sorry to have made you repeat yourself ",
"username": "Jeremy_Kermes"
}
] | [
"compass",
"mongodb-shell",
"containers"
] |
|
Boston MongoDB User Group Kickoff | 1,391 | Boston MongoDB User Group Kickoff | 2023-10-03T17:18:27.094Z | [
{
"code": "Senior Product Manager, Developer EducationAssociate Developer Advocate, MongoDBDirector of Developer Advocacy",
"text": "Screenshot 2023-10-25 1312531216×664 76.5 KB To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button once you RSVP. You need to be signed in to access the button. Have meetup.com? You can also register for this event on our Boston MongoDB User Group Meetup Page.Event Details:We are excited to welcome you to our first Boston MongoDB User Group (MUG). This evening is filled with:Agenda:Event Type: In-Person\nLocation: PTC Headquarters 121 Seaport Blvd Boston, MASenior Product Manager, Developer EducationAssociate Developer Advocate, MongoDBDirector of Developer Advocacy",
"username": "Veronica_Cooley-Perry"
},
{
"code": "",
"text": "We are excited to see you on Monday, November 13th at the MongoDB User Group (MUG) Meet-up!We are still accepting RSVPs so please consider inviting someone from your network.Location: PTC Headquarters 121 Seaport Blvd Boston, MA 2Event Schedule:Parking:There is a parking garage at 121 Seaport Blvd.Office Access:When you arrive at PTC Headquarters, MongoDB associates will help guide you into the event.This email address has been shared with PTC security. You’ll soon receive an email via the envoy system, guiding you to create an account. Afterward, you’ll get another email containing a QR code for office area access. You will need to show this QR code as well as a photo ID to security upon arrival. Failing to complete this ahead of time will require on-site completion, potentially causing entry delays.We ask that you kindly stay within the designated event location and maintain a respectful and professional atmosphere throughout the office.Please let me know if you have any questions!Best,Veronica and Chuck your MongoDB User Group Leader",
"username": "Veronica_Cooley-Perry"
}
] | [
"boston-mug"
] |
|
null | Convert json commn type to mongo types “string-id” to UUID(“string-id”) | 289 | Convert json commn type to mongo types "string-id" to UUID("string-id") | 2023-10-23T13:42:20.328Z | [
{
"code": "",
"text": "Hi dear friends \nI am working on a extension (js) which copies a JSON from the browser in to clipboard ready to be pasted.\nFor days now I have searched to find how to convert the id type which are string and convert them to UUID type.\ne.g:\n“id” : \" 0b2484c8-c069-4fa3-a23d-b40010ab59da\" >to> “id”: UUDI(“0b2484c8-c069-4fa3-a23d-b40010ab59da”)As well as for “LastUpdated”: “2023-10-23T08:46:13.213Z” >to> “LastUpdated”: ISODate(2023-10-23T08:46:13.213Z),I appreciate any idea or solution.",
"username": "Behzad_Pashaie2"
},
{
"code": "",
"text": "Hi @Behzad_Pashaie2,Welcome to the MongoDB Community forum I am working on a extension (js) which copies a JSON from the browser in to clipboard ready to be pasted.\nFor days now I have searched to find how to convert the id type which are string and convert them to UUID type.Are you looking for any specific conversion method? Also, may I ask the purpose of this conversion? Are you planning to store the data in a MongoDB Collection? If so, you might consider using a JS script to do the conversion process.Looking forward to your response.Best regards,\nKushagra",
"username": "Kushagra_Kesav"
},
{
"code": "",
"text": "KushagraHi @Kushagra_Kesav_1\nThanks for you kind response. Yes I am looking for a way (in javaScript function\\code ) to parse some values to different type (If I am not wrong BSON types!?)Let me explain it this way I have javascript code which copies the response of an API call as a :json1 :{\n“Id”: “b52918ea-3d32-4b5c-ac2c-114ac940c47d”,\n“RegDate”: “2023-09-26T00:00:00Z”\n}I need convert\\parse json1 tojson2 :{\n“Id”: UUDI(“b52918ea-3d32-4b5c-ac2c-114ac940c47d”),\n“RegDate”: ISODate(“2023-09-26T00:00:00Z”)\n}The reason to require this is when inserting json1 the “Id” and “RegDate” are inserted as string types while i need them to be UUID and ISODate type.\nI have searched a lot. I be so gratefull if help me with a solution\\practical approach o this.",
"username": "Behzad_Pashaie2"
},
{
"code": "document.addEventListener('DOMContentLoaded', function () {\n const copyButton = document.getElementById('copyButton');\n const msg = document.getElementById('msg');\n\n copyButton.addEventListener('click', function () {\n // Clear the existing URL list\n msg.innerHTML = '';\n\n // Get the current active tab's URL\n chrome.tabs.query({ active: true, currentWindow: true }, function (tabs) {\n const currentTab = tabs[0];\n let url = new URL(currentTab.url);\n //const url = currentTab.url;\n const baseUrl = url.protocol + '//' + url.hostname;\n\n console.log(url.hash);\n url= new URL(url.hash.replace(/^#/,\"\"),currentTab.url.split(\"#\")[0]);\n console.log(\"URL >>>\"+url);\n // Extract parameters from the URL (adjust as needed)\n const id = url.searchParams.get('id');\n \n const code = url.searchParams.get('code');\n \n const type = url.searchParams.get('type');\n // Construct the new URL\n const newUrl = `${baseUrl}/api/entity/${id}?companyCode=${code}&module=${type}`;\n console.log(newUrl);\n\n // Make the API call using the current URL\n fetch(newUrl)\n .then(response => response.json())\n .then(data => {\n data._id = data.Id;\n delete data.Id;\n const readyMongoData = changeMongoType(data);\n // Copy the API response to the clipboard\n copyToClipboard(JSON.stringify(readyMongoData)); // JSON.stringify(data)\n \n }).then(()=>{\n const newMsg = document.createElement('h3');\n newMsg.textContent = new DOMParser().parseFromString(`Copied to cliboard V4 <span>✓</span>`, 'text/html').body.textContent;\n msg.appendChild(newMsg);\n })\n .catch(error => {\n console.error('API Call Error:', error);\n });\n\n \n });\n });\n});\n\n\nfunction copyToClipboard(text) {\n navigator.clipboard.writeText(text)\n .then(() => {\n console.log('URL copied to clipboard:', text);\n // You can add a success message here\n })\n .catch(error => {\n console.error('Error copying to clipboard:', error);\n // You can add an error message here\n });\n}\n\nfunction parseDateToISODate(dateString) {\n const date = new Date(dateString);\n if (isNaN(date)) {\n // Handle invalid date input if necessary\n return null;\n }\n return date.toISOString();\n}\n\n\n\n///////////////////////////////////////////////////////////////////\nfunction changeMongoType(jsonObj) {\n \n for (const key in jsonObj) {\n if (jsonObj.hasOwnProperty(key)) {\n const value = jsonObj[key];\n \n if (typeof value === 'string' && /^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/.test(jsonObj[key])) {\n jsonObj[key] = convertStringToUUIDFormat(value);\n \n }else if (typeof value === 'string' && /^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3})?Z?$/.test(jsonObj[key]) && isStringADate(jsonObj[key])) {\n jsonObj[key] = `ISODate(\"${jsonObj[key]}\")`.replace(/\\\"/g, \"\");\n // jsonObj[key] = `ISODate(\"${jsonObj[key]}\")`.replace(/\\\"/g, \"\");\n } else if (typeof value === 'object') {\n changeMongoType(value); // Recursively process nested objects\n }\n }\n }\n \n return jsonObj;\n}\n\nfunction convertStringToUUIDFormat(input) {\n const regex = /^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$/;\n if (regex.test(input)) {\n console.log(`UUID('${input}')`.replace(/['\"]+/g, ''));\n //return `UUID('${input}')`.replace(/['\"]+/g, '');\n console.log(\"input> \"+input);\n return UUID(input);\n \n } \n return input;\n} \n\nfunction isStringADate(dateString) {\n // Try to parse the string into a Date object\n const date = new Date(dateString);\n // Check if the parsed date is a valid date and the original string is not \"Invalid Date\"\n return !isNaN(date) && date.toString() !== 'Invalid Date';\n}\n\nfunction preservId(dateString) {\n // Try to parse the string into a Date object\n const date = new Date(dateString);\n // Check if the parsed date is a valid date and the original string is not \"Invalid Date\"\n return !isNaN(date) && date.toString() !== 'Invalid Date';\n}```",
"text": "BTW here my code in the case you wonder:\nAs you see the changeMongoType function will do the job as now it only parsers\n“Id”: “b52918ea-3d32-4b5c-ac2c-114ac940c47d” to “Id”: “UUID(‘b52918ea-3d32-4b5c-ac2c-114ac940c47d’)\" which again is a string and dose not fulfill what I am looking for.",
"username": "Behzad_Pashaie2"
},
{
"code": "",
"text": "Hi @Kushagra_Kesav_1Hope you are doing well. \nAny updates on this issue?\nThanks",
"username": "Behzad_Pashaie2"
},
{
"code": "",
"text": "@Kushagra_Kesav_1Hi @Kushagra_Kesav_1. Any updates. Looking to hear from you.",
"username": "Behzad_Pashaie2"
}
] | [] |
null | After some aggregation stages, I want to filter the array within the document that matches with the conditions | 116 | After some aggregation stages, I want to filter the array within the document that matches with the conditions | 2023-11-09T13:15:32.869Z | [
{
"code": "```I have 2 collections\n\n1. external_S_P_FLAT_main_api \n2. external_S_C_FLAT_main_api\n\nThe collection has data as below.\n\n\"external_S_P_FLAT_main_api\": [\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674d\"\n },\n \"data.pricing.material\": \"TG11\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674e\"\n },\n \"data.pricing.material\": \"TG12\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef588674f\"\n },\n \"data.pricing.material\": \"TG14\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886750\"\n },\n \"data.pricing.material\": \"TG2341\",\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n }\n]\n\"external_S_C_FLAT_main_api\": [\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886751\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886752\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886753\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n },\n {\n \"_id\": {\n \"$oid\": \"654c6a594d0867aef5886754\"\n },\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n }\n]\n\nBelow is the query I am executing: \n\ndb.external_S_P_FLAT_main_api.aggregate([\n {\n \"$addFields\": {\n \"external_S_P_FLAT_main_api_data.pricing.controlling_area\": \"$data.pricing.controlling_area\"\n }\n },\n {\n \"$addFields\": {\n \"external_S_P_FLAT_main_api_data.pricing.cost_center\": \"$data.pricing.cost_center\"\n }\n },\n {\n \"$lookup\": {\n from: \"external_S_C_FLAT_main_api\",\n let: {\n let_data__pricing__controlling_area: \"$external_S_P_FLAT_main_api_data.pricing.controlling_area\",\n let_data__pricing__cost_center: \"$external_S_P_FLAT_main_api_data.pricing.cost_center\"\n },\n pipeline: [\n {\n \"$match\": {\n \"$expr\": {\n \"$and\": [\n {\n \"$eq\": [\n \"$data.costcenter.controlling_area\",\n \"$$let_data__pricing__controlling_area\"\n ]\n },\n {\n \"$eq\": [\n \"$data.costcenter.cost_center\",\n \"$$let_data__pricing__cost_center\"\n ]\n }\n ]\n }\n }\n }\n ],\n as: \"from_external_S_C_FLAT_main_api\"\n }\n },\n {\n \"$project\": {\n _id: 0,\n \"external_S_P_FLAT_main_api_data.pricing.controlling_area\": 0,\n \"external_S_P_FLAT_main_api_data.pricing.cost_center\": 0,\n // from_external_S_C_FLAT_main_api: 0,\n }\n }\n])\n\nBelow is the output comes: \n\n[\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n \"data.pricing.material\": \"TG11\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n \"data.pricing.material\": \"TG12\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n \"data.pricing.material\": \"TG14\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n \"data.pricing.material\": \"TG2341\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n }\n]\n\nI want the output in such a way that it has single element in the array from_external_S_C_FLAT_main_api which is matching with the condition as\nvalue of data.pricing.cost_center matches with from_external_S_C_FLAT_main_api.data.costcenter.cost_center and \nvalue of data.pricing.controlling_area matches with from_external_S_C_FLAT_main_api.data.costcenter.controlling_area\n\nSo the expected output should be as below:\n[\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000001\",\n \"data.pricing.material\": \"TG11\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886751\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000001\",\n \"data.costcenter.long_description\": \"CC DE000001 - 3rd\",\n \"data.costcenter.valid_from_date\": \"2023-09-12\"\n }\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000002\",\n \"data.pricing.material\": \"TG12\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886752\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\"\n },\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000003\",\n \"data.pricing.material\": \"TG14\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886753\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000003\",\n \"data.costcenter.long_description\": \"CC DE000003 - 1st\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n },\n ]\n },\n {\n \"data.pricing.controlling_area\": \"AJ00\",\n \"data.pricing.cost_center\": \"DE000004\",\n \"data.pricing.material\": \"TG2341\",\n \"external_S_P_FLAT_main_api_data\": {\n \"pricing\": {}\n },\n \"from_external_S_C_FLAT_main_api\": [\n {\n \"_id\": ObjectId(\"654c6a594d0867aef5886754\"),\n \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000004\",\n \"data.costcenter.long_description\": \"CC DE000004 - 2nd\",\n \"data.costcenter.valid_from_date\": \"2023-10-25\"\n }\n ]\n }\n]\n\nWhat am I missing? How shall I get the expected result?```\n",
"text": "",
"username": "Hemant_Joshi"
},
{
"code": " \"data.costcenter.controlling_area\": \"AJ00\",\n \"data.costcenter.cost_center\": \"DE000002\",\n \"data.costcenter.valid_from_date\": \"2022-03-02\",\n \"data.costcenter.long_description\": \"CC DE000002 - 1st\",\n \"data\" : {\n \"costcenter\" : {\n \"controlling_area\": \"AJ00\",\n \"cost_center\": \"DE000002\",\n \"valid_from_date\": \"2022-03-02\",\n \"long_description\": \"CC DE000002 - 1st\",\n }\n }\n",
"text": "To filter the array within the document you $project, $addFields or $set the array with $filter.Is there any reasons why your documents look like:rather thanI would be worry that the former structure takes more space. I would also be worry that you would need to project each field of data.costcenter individually.",
"username": "steevej"
},
{
"code": "",
"text": "@steevej,My documents look like this because I am storing the collection fields as dot notation.\nI have a Mongo version of v4.4.3Can you help me with the built query with the suggestions as below?\nfilter the array within the document you $project, $addFields or $set the array with $filter.",
"username": "Hemant_Joshi"
},
{
"code": "{ \"$set\" : {\n \"from_external_S_C_FLAT_main_api\" : { \"$filter\" : {\n \"input\" : \"$from_external_S_C_FLAT_main_api\" ,\n \"cond\" : { /* An expression that resolves to a boolean value used to determine if an element should be included in the output array. */ }\n } }\n} }\n",
"text": "It looks like you have not clicked on the $filter link I provided. Usually, there is a counter that indicates the number of time the link is clicked and as this morning there is no counter, so no one has clicked on the link. So it means you have not looked at the examples shown in the documentation.Example of $set with $filterWhy are youstoring the collection fields as dot notation",
"username": "steevej"
}
] | [
"aggregation"
] |
null | Realm Sync - Long Initial Download Times | 1,036 | Realm Sync - Long Initial Download Times | 2023-05-02T19:39:08.320Z | [
{
"code": "",
"text": "I have a macOS app built with the Realm Swift SDK. The dataset is about 150mb and is running on an M2.When a new user runs the app for the first time, the initial download of of the database takes FOREVER. It pushes 5 minutes, even on a 1000Mbps connection. Sync operations after the initial download are instant and perfect.Restructuring/segmenting the database is not an option; the app requires all of the data that is being downloaded.The initial-download performance is becoming untenable and I need advice on how to fix it. I found an old thread about compaction, here: MongoDB Realm syncing size - #13 by Brock_GLIt’s unclear if this advice is still relevant. Do I need to contact support to enable this, still? Or is the option now exposed in settings somewhere?",
"username": "Bryan_Jones"
},
{
"code": "",
"text": "Hi Bryan,\nWould you be willing to share your app ID? This will allow us to take a closer look and possibly determine if this is due to cluster limitations, network, or something else.",
"username": "mpobrien"
},
{
"code": "",
"text": "Sure. What’s the best way for me to get that to you in a non-public forum?",
"username": "Bryan_Jones"
},
{
"code": "",
"text": "I sent you a DM, we can coordinate there.",
"username": "mpobrien"
},
{
"code": "",
"text": "Hi,\nI have exactly the same issueDo you have any solution for it please ? We are obliged to uninstall every day the appThank you",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "Hi, this is often very dependent on your application (object size, bootstrap size, cluster size, etc). Can you send a link to your application (realm cloud URL) and we can poke in and see if anything stands out?Best,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Hi Tyler,\nThank you for your reply et sorry for my late replyHow can I send you the link ?",
"username": "Famille_Ripoche"
},
{
"code": "",
"text": "Sorry I was with the wrong account … Can you give me DM again please\nMy bad ",
"username": "Famille_Ripoche"
},
{
"code": "",
"text": "Hi,\nI need the DM on this account Laurent_RIPOCHE, the other one is an account created by error Thank you a lot !",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "Hi, you should be able to just DM me directly. Also, the URL in the realm URL should not be considered sensitive information as only you and MongoDB employees are able to view it",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Hi, I took a look at your app, and unfortunately I might need some more information from you. From my perspective, things look pretty good. In the past 10 days there have only been 27 bootstraps (clients connecting for the first time)Minimum Time: 315 ms\nMaximum Time: 564 ms\nAverage Time: 429 ms\nP95 Time: 545 msThis is the time it takes between receiving a connection from the client and sending the last of the download messages to the client. It is worth noting that the bootstraps themselves are not very large, so I wouldn’t expect there to be much time taken by the client to receive these changes and integrate them, but that part is not information collected by the server.Can you elaborate on what exactly is slow? And do you have a specific user / request_id from the logs that you are looking at?Best,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Hi,\nThank you for your answerThe apps stuck like 5 minutes or more at the loading screenThe loading screen say that 100 % is downloaded and stay stuck for a long time and then go to the new view\n(I’ve made a screenshot)IMG_70371170×2532 110 KBIt’s so long that we preferer to uninstall it and resintall. But it’s not a long term solution I’ ve used the starter template for SwiftUIThe second weird part is that my database is only like 20MB and take 18 GB of disk spaceThank you a lot !\nHave a nice day",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "Hi, this seems like something that is most likely an issue with the code used to open the realm and use it (and most likely blocking the main thread on something that is not happening). Can you share the code you are using for this?Best,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Hi Tyler,\nHope you are well\nI’ve sent you the sample of my code, did you receive it ?Have a nice day",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "Do you have an estimate for how much data is being synced down here? We do not have logging for this (though we should, and I can add that soon)",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "I don’t know,\nHow can i find this information ?Any clue for thie second issue :The second weird part is that my database is only like 20MB and take 18 GB of disk spacethank you",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "You client device logs should show how much data is being sent to the client. You can post those logs here if possible?As for the second, are you talking about realm or MongoDB?",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Thank you for your fast reply,\nI’ve sent you a dm with the log",
"username": "Laurent_RIPOCHE"
},
{
"code": "",
"text": "As for the second, are you talking about realm or MongoDB?\nMongoDB but we think that his realm who take the place",
"username": "Laurent_RIPOCHE"
}
] | [
"swift"
] |
null | Synonym mappings limit exceeded? | 114 | Synonym mappings limit exceeded? | 2023-11-09T02:23:39.364Z | [
{
"code": "title: {\n multi: {\n de: {\n searchAnalyzer: 'lucene.german',\n type: 'string',\n },\n en: {\n searchAnalyzer: 'lucene.english',\n type: 'string',\n },\n es: {\n searchAnalyzer: 'lucene.spanish',\n type: 'string',\n },\n fr: {\n searchAnalyzer: 'lucene.french',\n type: 'string',\n },\n it: {\n searchAnalyzer: 'lucene.italian',\n type: 'string',\n },\n pt: {\n searchAnalyzer: 'lucene.portuguese',\n type: 'string',\n },\n },\n type: 'string',\n },\n edition: {\n multi: {\n de: {\n searchAnalyzer: 'lucene.german',\n type: 'string',\n },\n en: {\n searchAnalyzer: 'lucene.english',\n type: 'string',\n },\n es: {\n searchAnalyzer: 'lucene.spanish',\n type: 'string',\n },\n fr: {\n searchAnalyzer: 'lucene.french',\n type: 'string',\n },\n it: {\n searchAnalyzer: 'lucene.italian',\n type: 'string',\n },\n pt: {\n searchAnalyzer: 'lucene.portuguese',\n type: 'string',\n },\n },\n type: 'string',\n },\n[\n{\n analyzer: 'lucene.english',\n name: 'book-english-synonyms',\n source: {\n collection: 'synonyms',\n },\n},\n{\n analyzer: 'lucene.spanish',\n name: 'book-spanish-synonyms',\n source: {\n collection: 'synonyms',\n },\n},\n]\n{ value: 'title', multi: 'en' },\n{ value: 'edition', multi: 'en' },\n",
"text": "Guys, some idea of what is this?, I have been looking through the documentation without success nor able to find a reference to such limit in the limits documentation.It happens if I try to add a second mapping to a search index.\nThe concrete case is that I have 2 fields title and edition that are analyzed differently depending on the language:And here is how I’m trying to create the mapping:And just to complete the idea I query like:And the problem that I’m trying to resolve is that I want to use a synonym for ‘1st’ to be ‘first’ and viceversa.\nConcrete example (and what I’m trying to resolve), If I search for “Optimize B2 first” or “Optimize B2 1st”, I should get the same results. Currently the result with larger score is the one that matches the way “first” is being written.Thanks in advance",
"username": "Ignacio_Larranaga"
},
{
"code": "{ value: 'title', multi: 'en' },\n{ value: 'edition', multi: 'en' },\n$search$search",
"text": "Thanks for providing that index definition and the context for the searches @Ignacio_Larranaga.And just to complete the idea I query like:And the problem that I’m trying to resolve is that I want to use a synonym for ‘1st’ to be ‘first’ and viceversa.\nConcrete example (and what I’m trying to resolve), If I search for “Optimize B2 first” or “Optimize B2 1st”, I should get the same results. Currently the result with larger score is the one that matches the way “first” is being written.I would like to get some further clarification here, is the 2 “query”'s you’ve mentioned in the above quote two separate $search queries? If so, could you provide the following:Look forward to hearing your response.Regards,\nJason",
"username": "Jason_Tran"
},
{
"code": "{\n \"mappings\": {\n \"dynamic\": false,\n \"fields\": {\n \"activeCopyCounts\": {\n \"dynamic\": true,\n \"type\": \"document\"\n },\n \"authors\": {\n \"type\": \"string\"\n },\n \"edition\": {\n \"multi\": {\n \"de\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"en\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"es\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"fr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"it\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"pt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n },\n \"isbn\": {\n \"type\": \"string\"\n },\n \"labelsDe\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"labelsEn\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"labelsEs\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"labelsFr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"labelsIt\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"labelsPt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n },\n \"prices\": {\n \"fields\": {\n \"countryCode\": {\n \"analyzer\": \"lucene.keyword\",\n \"searchAnalyzer\": \"lucene.keyword\",\n \"type\": \"string\"\n }\n },\n \"type\": \"document\"\n },\n \"publisher\": {\n \"type\": \"string\"\n },\n \"title\": {\n \"multi\": {\n \"de\": {\n \"analyzer\": \"lucene.german\",\n \"searchAnalyzer\": \"lucene.german\",\n \"type\": \"string\"\n },\n \"en\": {\n \"analyzer\": \"lucene.english\",\n \"searchAnalyzer\": \"lucene.english\",\n \"type\": \"string\"\n },\n \"es\": {\n \"analyzer\": \"lucene.spanish\",\n \"searchAnalyzer\": \"lucene.spanish\",\n \"type\": \"string\"\n },\n \"fr\": {\n \"analyzer\": \"lucene.french\",\n \"searchAnalyzer\": \"lucene.french\",\n \"type\": \"string\"\n },\n \"it\": {\n \"analyzer\": \"lucene.italian\",\n \"searchAnalyzer\": \"lucene.italian\",\n \"type\": \"string\"\n },\n \"pt\": {\n \"analyzer\": \"lucene.portuguese\",\n \"searchAnalyzer\": \"lucene.portuguese\",\n \"type\": \"string\"\n }\n },\n \"type\": \"string\"\n }\n }\n },\n \"synonyms\": [\n {\n \"analyzer\": \"lucene.standard\",\n \"name\": \"book-synonyms\",\n \"source\": {\n \"collection\": \"synonyms\"\n }\n }\n ]\n}\n{\n ...,\n \"synonyms\": [\n {\n \"analyzer\": \"lucene.standard\",\n \"name\": \"book-synonyms\",\n \"source\": {\n \"collection\": \"synonyms\"\n }\n },\n {\n \"analyzer\": \"lucene.spanish\",\n \"name\": \"book-synonyms-es\",\n \"source\": {\n \"collection\": \"synonyms-es\"\n }\n }\n ]\n}\n",
"text": "Thanks @Jason_Tran , just to clarify I’m not really having an issue with the query but when creating the mapping. The “Synonym mappings limit exceeded” appears when I try to add a second synonym to the same index.Specifically here are my current mappings:If I try for example to do:The error appears, right in the admin console.\nI don’t think it is related to the document or the mapping itselft but to my cluster tier (M0 & M2).\nMight it be?I’ll provide a sample data-set, query and mapping for the 1st/first issue I described above but the root of the problem is this inability to add a second synonym.",
"username": "Ignacio_Larranaga"
},
{
"code": "",
"text": "Thanks for clarifying Ignacio.I don’t think it is related to the document or the mapping itselft but to my cluster tier (M0 & M2).As of now there are some limits related to Atlas Search and M0, M2/M5 tier clusters. I’m just checking if the below is related or if there’s other limits that may be causing this error:Regards,\nJason",
"username": "Jason_Tran"
},
{
"code": "",
"text": "Just for clarification, in my case the synonyms collections has 32 docs.",
"username": "Ignacio_Larranaga"
}
] | [] |
null | Emmanuel Katto African Introduction | 109 | Emmanuel Katto African Introduction | 2023-11-08T07:11:53.358Z | [
{
"code": "",
"text": "Heya, I’m Emmanuel Katto, African Minerals and Mining Engineer expert from Uganda. I like to travel, do adventure sports, and try new cuisines when I’m not working. I find technology very intriguing and would love to share ideas to make this globe a better place to live. I am new to this community, hope will learn alot about the latest technology and development techniques.Thanks & RegardsEmmanuel Katto from Africa",
"username": "Emmanuel_Katto"
},
{
"code": "",
"text": "Welcome to the community! We’re so glad you’re here. Please be sure to check out our MongoDB User Groups and join one near you!",
"username": "Karissa_Fuller"
},
{
"code": "",
"text": "Thank you so much for your warm welcome…",
"username": "Emmanuel_Katto"
}
] | [] |
null | Create a new user based on email only? | 157 | Create a new user based on email only? | 2023-11-04T13:48:53.402Z | [
{
"code": "",
"text": "I’m just starting to experiment with RealmCpp and Atlas Sync.I’ve verified everything works great with anonymous users.Now I want to be able to create a new user using their email as their ID. I don’t want to store a password for them. How would I go about this?It looks I could use Custom Function Authentication, and write a simple function that internally identifies users based on their email / username only.Thank you!",
"username": "Adam_Wilson"
},
{
"code": "",
"text": "I’d expect custom functions to work in the way you describe! Another alternative could be to hardcode a password in your app, and only use user input for the email field, though that is admittedly a bit hacky.",
"username": "Sudarshan_Muralidhar"
}
] | [] |
Los Angeles MongoDB User Group (LA:MUG) | 877 | Los Angeles MongoDB User Group (LA:MUG) | 2023-10-11T22:32:55.133Z | [
{
"code": "Solution Architect",
"text": "LA_MUG1920×1080 161 KBThe LA MUG will host regular meetings to bring the MongoDB community together. It fosters sharing knowledge and meeting people who use and love MongoDB. Meetups will include interesting talks, networking, and discussions with and our community.This meetup will include a talk titled “MongoDB First Steps”. The presentation is a light introduction to MongoDB covering the unique aspects of reading and writing data with MongoDB. It is a gentle introduction with lots of demos to highlight the document database paradigm and how to work with it. No special background is required, and questions are encouraged!If you are a developer, DBA, analyst, or new to MongoDB - this talk is for you! To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button if you are going. You need to be signed in to access the button.Event Type: In-Person\nLocation: 2219 Main Street, Santa Monica CA 90405Solution ArchitectNuri wrangles code, data, and random bytes for a living. He’s been using MongoDB since version 1.8 to this very day.image726×746 66.2 KB",
"username": "Nuri_Halperin"
},
{
"code": "",
"text": "Hey All,Gentle Reminder: The event is tomorrow and we are excited to have you all join us tomorrowThe event is scheduled to begin at 18:15 at the Beach House CoWork.We want to make sure everyone has a fantastic time, so please arrive on time at 18:15 to ensure you don’t miss the session, and we can all have some time to chat before the talk begins.If you have any questions, please don’t hesitate to ask by replying to this thread Looking forward to seeing you all at the event!",
"username": "Harshit"
},
{
"code": "",
"text": "Hey All,Gentle Reminder: The event is tomorrow and we are excited to have you all join us tomorrowThe event is scheduled to begin at 18:15 at the Beach House CoWork.We want to make sure everyone has a fantastic time, so please arrive on time at 18:15 to ensure you don’t miss the session, and we can all have some time to chat before the talk begins.If you have any questions, please don’t hesitate to ask by replying to this thread.Looking forward to seeing you all at the event!\nimage928×707 158 KB",
"username": "Nuri_Halperin"
}
] | [
"losangeles-mug"
] |
|
null | db.colectionName1.aggregate([ { $match: { “filed1”: { $in: [“x”, “y”] }, } }, { $lookup: { from: “colectionName2”, localField: “_id”, foreignField: “xy”, as: “matchedDate” } }, { $match: { | 88 | db.colectionName1.aggregate([ { $match: { "filed1": { $in: ["x", "y"] }, } }, { $lookup: { from: "colectionName2", localField: "_id", foreignField: "xy", as: "matchedDate" } }, { $match: { | 2023-11-09T08:06:07.916Z | [
{
"code": "",
"text": "help to get optimized query , taking long time, any alternate methods",
"username": "P.S_Shilpa"
},
{
"code": "",
"text": "Please update the title of your post to something more comprehensible.Please provide the complete pipeline.Please provide the explain plan of your query.Please provide sample documents.",
"username": "steevej"
}
] | [
"aggregation"
] |
null | Using setDifference with mutiple collections | 118 | Using setDifference with mutiple collections | 2023-11-08T17:03:32.454Z | [
{
"code": "",
"text": "I am working with backups. At the moment I have two collections: A and A2.\ndocuments in both locations look like this\n{_id:ObjectId(“6456456456”), “name”:“a_unique_name”}The documents in A don’t share the same _id, they only share the “name” attribute.So my question is how can I use an aggregation pipeline to compare the two collections and find the difference between the sets of “name” attributes.Someone gave me a solution that required the creation of an additional attribute. I don’t think that sounds like an efficient idea. I image MongoDB is very powerful and should be able to not resort to setting and unsetting attributes per document when I have almost a billion of them.",
"username": "Leslie_Solorzano"
},
{
"code": "pipeline_A = [ ] ;\n\nlookup = { \"$lookup\" : {\n \"from\" : \"A2\" ,\n \"localField\" : \"name\" ,\n \"foreignField\" : \"name\" ,\n \"as\" : \"A2\" ,\n \"pipeline\" : [\n { \"$project\" : { \"_id\" : 1 } ,\n { \"$limit\" : 1 }\n ]\n} }\n\npipeline_A.push( lookup ) ;\n\nmatch = { \"$match\" : {\n \"A2.0\" : { \"$exists\" : false }\n} } ;\n\npipeline_A.push( match ) ;\n\nproject = { \"$project\" : {\n \"name\" : 1\n} }\n\n/* The following should produce names in A that are not in A2 */\n\ndb.A.aggregate( pipeline_A ) ;\n",
"text": "My approach would use $lookup.I would aggregate first on A to $lookup in A2, then a second aggregation on A2 to $lookup in A.You then do the same with pipeline_A2.Yes, there are 2 database accesses but with $unionWith you can do it in 1.Yes, matching names are processed twice.Yes, you need an index with name:1 prefix to have some kind of performance.When doing that sort of things, I like to $out the result in a temporary collection to explore and process the results later.",
"username": "steevej"
}
] | [
"aggregation"
] |
null | Node.JS CSFLE-enabled Enterprise database is not encrypting data (clear text visible in Compass) | 89 | Node.JS CSFLE-enabled Enterprise database is not encrypting data (clear text visible in Compass) | 2023-11-09T17:37:52.287Z | [
{
"code": "mongocryptdmongoshmongooseMongoDB Enterprise 7.0.2\n\"mongodb\": \"^6.2.0\",\n\"mongodb-client-encryption\": \"^6.0.0\",\n\"mongoose\": \"^8.0.0\",\ninitializeEncryptionautoEncryptionexport default class Encryption implements IEncryption {\n // ... There are several private and public variables not shown here\n\n // private constructor to enforce calling `initialize` method below, which calls this constructor internally\n private constructor(opts?: EncryptionConfigConstructorOpts) {\n this.tenantId = opts?.tenantId;\n this.keyVaultDbName = opts?.keyVaultDbName;\n this.keyVaultCollectionName = opts?.keyVaultCollectionName;\n this.DEKAlias = opts?.DEKAlias;\n\n // Detect a local development environment\n if (process.env?.ENVIRONMENT === LOCAL_DEV_ENV) {\n const keyBase64 = process.env?.LOCAL_MASTER_KEY;\n const key = Buffer.from(keyBase64, 'base64');\n\n // For testing, I'm manually switching between a local key and remote KMS\n // I'll leave out the production-detection code\n if (_debug) {\n this.provider = KMS_PROVIDER;\n this.kmsProviders = {\n aws: {\n accessKeyId: process.env.AWS_ACCESS_KEY_ID,\n secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,\n },\n };\n this.masterKey = {\n key: process.env.KMS_MASTER_ARN,\n region: opts?.masterRegion,\n };\n } else {\n this.kmsProviders = {\n local: {\n key,\n },\n };\n }\n }\n\n const keyVaultNamespace = `${this.keyVaultDbName}.${this.keyVaultCollectionName}`;\n\n const encryptionOptions: ClientEncryptionOptions = {\n keyVaultNamespace,\n kmsProviders: this.kmsProviders,\n };\n\n this.encryptionOptions = encryptionOptions;\n }\n\n public static async initialize(\n url: string,\n opts?: EncryptionConfigConstructorOpts\n ): Promise<Encryption> {\n // Set internal attributes\n const encryption = new Encryption(opts);\n\n // Create key vault collection (this is idempotent, afaik)\n const client = new MongoClient(url);\n const keyVaultDB = client.db(encryption.keyVaultDbName);\n const keyVaultColl = keyVaultDB.collection(encryption.keyVaultCollectionName);\n await keyVaultColl.createIndex(\n { keyAltNames: 1 },\n {\n unique: true,\n partialFilterExpression: { keyAltNames: { $exists: true } },\n }\n );\n\n let dek: UUID | undefined = undefined;\n\n // This checks for an existing DEK, then creates/assigns or just assigns when necessary\n try {\n // Initialize client encryption\n const clientEncryption = new ClientEncryption(client, encryption.encryptionOptions!);\n const keyOptions = {\n masterKey: encryption.masterKey,\n keyAltNames: [encryption.DEKAlias],\n };\n dek = await clientEncryption.createDataKey(encryption.provider, keyOptions);\n } catch (err: any) {\n // Duplicate key error is expected if the key already exists, so we fetch the key if that happens\n if (String(err?.code) !== '11000') {\n throw err;\n } else {\n // Check if a DEK with the keyAltName in the env var DEK_ALIAS already exists\n const existingKey = await client\n .db(encryption.keyVaultDbName)\n .collection(encryption.keyVaultCollectionName)\n .findOne({ keyAltNames: encryption.DEKAlias });\n\n if (existingKey?._id) {\n dek = UUID.createFromHexString(existingKey._id.toHexString());\n } else {\n throw new Error('DEK could not be found or created');\n }\n }\n } finally {\n await client.close();\n }\n\n encryption.dek = dek;\n encryption.isReady = !!encryption.dek;\n return encryption;\n }\n\n // Defined as an arrow function to preserve the `this` context, since it is called as a callback elsewhere\n // This gets called after the `initialize` method from within each micro-service\n public getSchemaMap = (\n jsonSchema: Record<string, unknown>,\n encryptionMetadata?: Record<string, unknown>\n ): Record<string, unknown> => {\n if (!this?.isReady) {\n throw new Error('Encryption class cannot get schema map until it is initialized');\n }\n\n const schemaMapWithEncryption = {\n encryptMetadata: {\n keyId: [this.dek],\n algorithm: process.env.ALG_DETERMINISTIC,\n ...encryptionMetadata,\n },\n ...jsonSchema,\n };\n return schemaMapWithEncryption;\n };\n}\n\n// ... Start up code\n const encryption = await Encryption.initialize(process.env.DB_CONN_STRING);\n const opts = {\n autoEncryption: {\n ...encryption.encryptionOptions\n },\n };\n\n await Service1Models.initialize(process.env.DB_CONN_STRING, opts, encryption.getSchemaMap);\n await Service2Models.initialize(process.env.DB_CONN_STRING, opts, encryption.getSchemaMap);\n\n// ... More start up code and API route config\ninitialize// ... Init code and then assigning the schema-generated model (which does not contain any encryption syntax)\nService1Model.service1DataModel = model<IService1Document>('Service1Doc', Service1Schema, 'Service1Docs');\n\n// Finally, connecting to the DB with a schema map generated for this service, specifically\nmongoose.connect(url, {\n ...opts,\n autoEncryption: opts?.autoEncryption\n ? {\n ...opts?.autoEncryption,\n schemaMap: getSchemaMap(importedSchemaJson),\n }\n : undefined,\n } as ConnectOptions);\n{\n \"MyCollection1\": {\n \"properties\": {\n \"myDataString\": {\n \"encrypt\": {\n \"bsonType\": \"string\"\n }\n },\n \"myDataArray\": {\n \"encrypt\": {\n \"bsonType\": \"array\"\n }\n },\n \"myDataObject\": {\n \"bsonType\": \"object\",\n \"properties\": {\n \"myNestedProperty1\": {\n \"encrypt\": {\n \"bsonType\": \"string\"\n }\n },\n \"myNestedProperty2\": {\n \"bsonType\": \"string\"\n }\n }\n }\n }\n }\n}\n",
"text": "In my Node.JS local development environment, using Compass, connected to a CSFLE-enabled MongoDB Enterprise client, I can see all the fields that are supposed to be encrypted by my JSON schema as clear text.",
"username": "Angus_Ryer"
},
{
"code": "",
"text": "@wan I heard you’re the expert around here ",
"username": "Angus_Ryer"
}
] | [
"mongoose-odm",
"compass",
"mongodb-shell",
"field-encryption",
"schema-validation"
] |
null | Index is not used on OR(NOT Exists(col), col is NULL) | 83 | Index is not used on OR(NOT Exists(col), col is NULL) | 2023-11-09T15:45:39.469Z | [
{
"code": "\"aggregate\" (\"pipeline\": [{\"$match\": {\"$or\": [{\"Review\": {\"$exists\": false}},{\"Review\": null}]}},{ \"$group\": {\"_id\": {},\"COUNT\": {\"$count\": {}}}}",
"text": "Hi, I have a collection with an index on { Review: 1}, and am trying to count the documents without a Review property or where the Review property is null.Query is: \"aggregate\" (\"pipeline\": [{\"$match\": {\"$or\": [{\"Review\": {\"$exists\": false}},{\"Review\": null}]}},{ \"$group\": {\"_id\": {},\"COUNT\": {\"$count\": {}}}}In the execution plan, it seems like the index is scanned, not seeked. Is there a way to get a seek out of this?",
"username": "Bruno_Denuit-Wojcik"
},
{
"code": "",
"text": "In the execution plan, it seems like the index is scanned, not seeked.Please provide the plan. What do you mean by scanned vs seeked?",
"username": "steevej"
}
] | [
"aggregation",
"indexes"
] |
null | Adding search by user email functionality in app services app users | 331 | Adding search by user email functionality in app services app users | 2023-06-23T09:26:11.937Z | [
{
"code": "",
"text": "Howdy folks,I’m currently using app service auth for my app, which was fine, but thankfully has no gained a bit of traction and I have almost 3k users on it. The issue I’m facing now is that a new user is unable to sign in and i’m unable to see their account to see if they are verified because they’re down near the end of the list.Wondering if we could add a query by user email to the app services app users list which would allow me to find this user and help them get into their account.Thanks in advance\nEvan\ngymbuddy.ai",
"username": "gymbuddy_ai"
},
{
"code": "LOAD MORE",
"text": "I second this as a feature request.I end up Command + F searching for the email after tapping LOAD MORE a bunch of times.It’s super frustrating when I have to do it.",
"username": "Kurt_Libby1"
}
] | [
"queries"
] |
null | EF Core Migrations | 109 | EF Core Migrations | 2023-11-08T22:09:22.029Z | [
{
"code": "",
"text": "Hello,I realize the Mongo EF Core provider preview was recently released. Does anyone know if EF Core Migrations are supported?",
"username": "Herb_Ramos"
},
{
"code": "",
"text": "Hi, @Herb_Ramos,Thank you for your interest in the first public preview of our EF Core Provider. We do not currently support migrations, but are in discussions both internally and with Microsoft about how to support the notion of migrations in the context of document databases. You can find a list of our supported features along with the high-level roadmap on the GitHub project’s README.md.Sincerely,\nJames",
"username": "James_Kovacs"
},
{
"code": "",
"text": "Thanks for the quick reply.",
"username": "Herb_Ramos"
}
] | [
"dot-net"
] |
null | Privacy of flexible sync | 105 | Privacy of flexible sync | 2023-11-09T03:13:41.456Z | [
{
"code": "",
"text": "Hi,Using flexible sync- isn’t that a little weird that users have theoretical access to the entire database- unless filtered by a query on their device? I mean, it’s got advantages I guess for sharing data, but it sounds a bit hack-prone doesn’t it?\nI mean, the entire database is accessible to the client side app, which define rules/queries for accessing the user-specific data. Am I missing something/?",
"username": "donuts542"
},
{
"code": "",
"text": "Hi, permissions are defined on the server to define the access rules for the system (and any particular user). Please see here for more details: https://www.mongodb.com/docs/atlas/app-services/sync/app-builder/device-sync-permissions-guide/The view of data being synced down is a function of (a) the user’s permissions and (b) the users’s subscriptionsIf you have any other questions, please let me know.Best,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "",
"text": "Adding to the page Tyler mentioned, we also have a page in some of the SDK docs that goes into more details about how the combination of permissions and the Flexible Sync query determine what data can sync: https://www.mongodb.com/docs/realm/sdk/swift/sync/write-to-synced-realm/#determining-what-data-syncsThis page also has an example of what happens if you try to write data that doesn’t match the server-side permissions in App Services.This page hasn’t made it to all of the SDKs yet, so apologies if you haven’t seen it in an SDK you’re working with.",
"username": "Dachary_Carey"
}
] | [] |
null | Permission issues migrating partition-based sync to flexible sync | 230 | Permission issues migrating partition-based sync to flexible sync | 2023-11-02T11:36:34.002Z | [
{
"code": "_partitionKeyuser=user.data.externalUserIduser=fbdc4c82-3c18-4bee-9064-445b75f93cfeuserMatadata=user.data.externalUserIduserMetadata=fbdc4c82-3c18-4bee-9064-445b75f93cfecanReadPartitioncanWritePartitionexports = function (partition) {\n console.log(`Checking if can sync a write for partition = ${partition}`);\n\n const user = context.user;\n\n let partitionKey = \"\";\n\n const splitPartition = partition.split(\"=\");\n let partitionValue;\n if (splitPartition.length == 2) {\n partitionKey = splitPartition[0];\n partitionValue = splitPartition[1];\n console.log(`Partition key = ${partitionKey}; partition value = ${partitionValue}`);\n } else {\n console.log(`Couldn't extract the partition key/value from ${partition}`);\n return false;\n }\n\n switch (partitionKey) {\n case \"user\":\n case \"userMetadata\":\n console.log(`Checking if partitionValue(${partitionValue}) matches user.id(${user.data.externalUserId}) – ${partitionKey === user.data.externalUserId}`);\n return partitionValue === user.data.externalUserId;\n default:\n console.log(`Unexpected partition key: ${partitionKey}`);\n return false;\n }\n};\nReceived: ERROR \"Permission denied (BIND, IDENT, QUERY, REFRESH)\" (error_code=206, is_fatal=false, error_action=ApplicationBug)\npartition_owner{\n \"roles\": [\n {\n \"name\": \"partition_owner\",\n \"apply_when\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n },\n \"document_filters\": {\n \"write\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n },\n \"read\": {\n \"$or\": [\n {\n \"_partitionKey\": \"user=%%user.data.externalUserId\"\n },\n {\n \"_partitionKey\": \"userMetadata=%%user.data.externalUserId\"\n }\n ]\n }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\n",
"text": "We are currently using partition-based sync. We are testing the migration to flexible-sync and experiencing some issues while migrating the permission logic.In the partition-based model, we use “key=value” style _partitionKey. We have 2 basic partitionsThe access rules to both of these partitions are pretty trivial - the user can read and write both of those partitions when he is the owner. And that is governed by canReadPartition and canWritePartition functions that have the exact same body.Now, when migrating to flexible sync I need to define roles that would mimic this behavior. And that’s what I can’t figure out as after migration I get the following errorThe latest version of my role which I’ve named partition_owner looks like this.What am I doing wrong here?",
"username": "Gagik_Kyurkchyan"
},
{
"code": "\"user=%%user.data.externalUserId\"user=%%user.data.externalUserId\"read\": {\n \"$or\": [\n {\n \"user\": \"%%user.data.externalUserId\"\n },\n {\n \"userMetadata\": \"%%user.data.externalUserId\"\n }\n ]\n}\n",
"text": "Hi, I suspect what is going wrong here is that \"user=%%user.data.externalUserId\" is being interpreted as a string value and the expansion is not actually being run on the user object, so it is looking for _partitionKey to equal the value user=%%user.data.externalUserId.As a first question, it seems like the partition concept did not quite fit what you wanted, so you had to add a key/value storage into it. Are user and userMetadata fields on the documents themselves? If so, I suspect the ideal permissions for you would be to just define permission like this:Additionally, I believe you should remove the apply_when in the statement. For device sync, this field cannot reference fields in a document as it is applied at session start (not on each individual document). See here: https://www.mongodb.com/docs/atlas/app-services/rules/roles/#how-app-services-assigns-rolesBest,\nTyler",
"username": "Tyler_Kaye"
},
{
"code": "_partitionKeyuseruserMetadata\"user=%%user.data.externalUserId\"OwnerIdOwnerId",
"text": "Hey TylerAll of the entities that I have the _partitionKey values in the format of “user=EXTERNAL_USER_ID” or “userMetadata=EXTERNAL_USER_ID”. I do not have a user or a userMetadata field.Can we something validate that the \"user=%%user.data.externalUserId\" expression is not being string-interpolated and being used as is?If that’s the case, will I have to create a new field called “OwnerId” for instance, andIf you think that’s the most straightforward approach, I’d go for it.",
"username": "Gagik_Kyurkchyan"
},
{
"code": "OwnerId_partitionKey_partitionKey%%{\n \"name\": \"role\",\n \"apply_when\": {},\n \"document_filters\": {\n \"read\": { \"OwnerId\": \"%%user.data.externalUserId\" },\n \"write\": { \"OwnerId\": \"%%user.data.externalUserId\" }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n}\n",
"text": "Hi @Gagik_Kyurkchyan,I imagine creating that new OwnerId field as you described above would be your best bet, assuming you want to avoid breaking changes / modifying the existing data for _partitionKey.The App Services Rules system isn’t really designed for handling the format of the values for _partitionKey unfortunately (it looks for the expansion identifier %% at the beginning of the string in particular). Note that it isn’t possible to call a function here in the same manner as before because the function operator is evaluated at session start – before any documents have been observed.I imagine the role you’ll want to define will look something like this in JSON:Let me know if that works,\nJonathan",
"username": "Jonathan_Lee"
},
{
"code": "{\n \"roles\": [\n {\n \"name\": \"owner\",\n \"apply_when\": {},\n \"document_filters\": {\n \"write\": {\n \"CreatedBy\": \"%%user.data.externalUserId\"\n },\n \"read\": {\n \"CreatedBy\": \"%%user.data.externalUserId\"\n }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n }\n ]\n}\nCreatedByCreatedByending session with error: integrating changesets failed: operation was cancelled after retrying 12 times (ProtocolErrorCode=201)\n",
"text": "Hey @Jonathan_LeeThanks for getting back.I was trying several things before replying here. Here’s my journeyFirst thing I tried is the strategy I mentionedOnce I did this, I decided to launch the app from the master branch without changing any code as that’s what I am trying to achieve - seamless activation of flexible sync. Unfortunately, that didn’t go well. I would receive invalid permissions errors.I decided to add the CreatedBy field to the client code and ensure its value is set correctly. Once I did that, I was finally able to sync the data to the device. This means, we won’t be able to migrate to flexible sync unless we release a new version of the app that has CreatedBy field and everybody needs to upgrade to that latest version.However, there are more issues that I am experiencing. When I try to create entities like before, they won’t sync. Sync times out and I see the following error in my App Service logsSo, apparently, there are more deeper issues we will have to address.My question is the following, are there any real-world scenarios where partition-based sync was seamlessly migrated to flexible sync?\nPerhaps, a better option would simply be to deprecate the old app and start a new one that has flexible-sync built in? And suggest our customers to switch?",
"username": "Gagik_Kyurkchyan"
},
{
"code": "{\n \"name\": \"role\",\n \"apply_when\": {},\n \"document_filters\": {\n \"read\": { \"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }, \"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" } } ] }\n \"write\": { \"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }, \"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" } } ] }\n },\n \"read\": true,\n \"write\": true,\n \"insert\": true,\n \"delete\": true,\n \"search\": true\n}\n\"_partitionKey\": { \"$regex\": \"%%user.data.externalUserId\" }\"%%user.data.externalUserId\"\"_partitionKey\"\"$or\": [ { \"_partitionKey\": { \"$regex\": \"^user=<externalUserId-regex>$\" } }, { \"_partitionKey\": { \"$regex\": \"^userMetadata=<externalUserId-regex>$\" }<externalUserId-regex>user.data.externalUserId",
"text": "If updating the schema & deploying a new version of the app is unacceptable, then you could consider using the $regex operator on the “_partitionKey” field like this:It’s a bit ugly, but breaking down the rule expression here for the document read/write filters:Using this above configuration should hopefully tightly match the current partition-based sync permissions in a way that is compatible with flexible sync. Another thing worth mentioning here is that there is obviously some amount of performance hit with using the “$regex” operator now, and long term it may be better to continue to try and use a new field for permissions (it will definitely be faster).Let me know what you think,\nJonathan",
"username": "Jonathan_Lee"
}
] | [
"flexible-sync"
] |
null | How to make $geoIntersects work for the “legacy coordinates pair” | 96 | How to make $geoIntersects work for the “legacy coordinates pair” | 2023-11-08T22:05:35.732Z | [
{
"code": "$geoIntersectsdb.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })null",
"text": "How to make $geoIntersects work for the “legacy coordinates pair”?it is simple to find the user’s current neighborhood with $geoIntersects.Trying to make/adapt the following find by $geoIntersects work with the “legacy coordinates pair” –\ndb.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })but I always get null results, even I surely know there is a match.Suppose the user is located at -73.93414657 longitude and 40.82302903 latitude. To find the current neighborhood, you will specify a point using the special $geometry field in GeoJSON format.I guess, on top of the above requirement, the field used for the $geoIntersects lookup must be in GeoJSON format as well, (unlike $geoWithin or $nearSphere), right?So, if my collection uses the legacy coordinates pair, like the sample_restaurants from sample db, is there any way for me to use the $geometry query please?",
"username": "MBee"
},
{
"code": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })sample_restaurants.neighborhoods{\n _id: ObjectId(\"55cb9c666c522cafdb053a68\"),\n geometry: {\n coordinates: [\n [\n [\n -73.93383000695911,\n 40.81949109558767\n ],\n [\n -73.93411701695138,\n 40.81955053491088\n ],\n ... 248 more items\n ]\n ],\n type: 'Polygon'\n },\n name: 'Central Harlem North-Polo Grounds'\n}\n",
"text": "Hey @MBee,db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })\nbut I always get null results, even I surely know there is a match.I tried the same query against the sample_restaurants.neighborhoods collection. It worked as expected and returned the following document as output:Please double-check that you are running the query against the correct collection, and ensure that your collection contains the correct set of data for accurate results.Best regards,\nKushagra",
"username": "Kushagra_Kesav"
},
{
"code": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })",
"text": "db.neighborhoods.findOne({ geometry: { $geoIntersects: { $geometry: { type: \"Point\", coordinates: [ -73.93414657, 40.82302903 ] } } } })Ah, you’re right. I made a tiny mistake when trying the above.\nThanks for the confirmation.",
"username": "MBee"
}
] | [
"queries"
] |
null | Objective-C Subscription predicate on ObjectId field | 104 | Objective-C Subscription predicate on ObjectId field | 2023-11-08T22:07:15.696Z | [
{
"code": "RLMRealmConfiguration *configuration = [self.realmApp.currentUser flexibleSyncConfigurationWithInitialSubscriptions:^(RLMSyncSubscriptionSet * _Nonnull subscriptions) {[subscriptions addSubscriptionWithClassName:@\"DealGeneratorSet\" where:@\"owner_id = %@\", self.realmApp.currentUser.identifier];} rerunOnOpen:true];\nTerminating app due to uncaught exception 'NSInvalidArgumentException', reason: 'Unable to parse the format string \"owner_id = 650781f373e92d4da4f51073\"'\nterminating with uncaught exception of type NSException\n\n",
"text": "I am having trouble setting up a Realm Subscription using the ObjectId field in the where predicate. I tried all sorts of string combinations, and the error persisted. All my objects have owner_id of type ObjectID, and the user should sync only its objects.\nHere is my code:The error is always along those lines:",
"username": "Milen_Milkovski"
},
{
"code": "[subscriptions addSubscriptionWithClassName:@\"DealGeneratorSet\" where:@\"owner_id == %@\", [[RLMObjectId alloc] initWithString:self.realmApp.currentUser.identifier error:nil]];\n",
"text": "It looks like after the update to the latest SDK 10.44, this code works as expected:",
"username": "Milen_Milkovski"
}
] | [
"objective-c"
] |
Jakarta MUG x DKatalis : Leveraging MongoDB in Modern Enterprises | 494 | Jakarta MUG x DKatalis : Leveraging MongoDB in Modern Enterprises | 2023-11-02T04:43:27.564Z | [
{
"code": "Senior Cloud Solution ConsultantTechnical LeadFull Stack EngineerData Warehouse Engineer",
"text": "WhatsApp Image 2023-11-02 at 12.08.461600×900 109 KBWe are thrilled to announce our offline community event collaboration with Dkatalis in Jakarta. Are you ready to dive into the world of MongoDB and gain insights from experts in the field? Don’t miss our upcoming MongoDB Community Event where we’ll explore exciting topics.Please fill this form first Pre-Registration Form To RSVP - Please click on the “ ✓ RSVP ” link at the top of this event page if you plan to attend. The link should change to a green button if you are RSVPed. You need to be signed in to access the buttonEvent Type: In-Person\nLocation: Dkatalis Jakarta Office, Menara BTPN 43rd Floor. Jl. Dr. IDe Anak Agung Gde Agung Kav. 5.5-5,6 CBD Mega Kuningan - SetiabudiWhatsApp Image 2023-10-24 at 14.09.06800×800 61 KBSenior Cloud Solution Consultant at Searce IncWhatsApp Image 2023-10-27 at 11.24.29-removebg-preview408×612 30.2 KBTechnical Lead at DKatalisWhatsApp Image 2023-10-29 at 23.16.35-removebg-preview458×544 49 KBFull Stack Engineer at DKatalis1667705812109600×600 145 KBData Warehouse Engineer at Bank BTPNCollaboration with DKatalisLogo DK Vertical royal purple (1)3150×3150 118 KB",
"username": "Fajar_Abdul_Karim"
},
{
"code": "",
"text": "Interesting topics! Unfortunately, I won’t be able to come in person. ",
"username": "berviantoleo"
},
{
"code": "",
"text": "sayang banget bro, if you have any feedback please dm me",
"username": "Fajar_Abdul_Karim"
}
] | [
"jakarta-mug"
] |
|
Issue related mongodb parse error | 110 | Issue related mongodb parse error | 2023-11-08T11:10:37.554Z | [
{
"code": "",
"text": "MongoParseError: Invalid scheme, expected connection string to start with “mongodb://” or “mongodb+srv://”\nimage1112×274 12.2 KB",
"username": "Aum_Shukla"
},
{
"code": "",
"text": "Show your connect string from the env file\nThere could be some syntax error",
"username": "Ramachandra_Tummala"
},
{
"code": "",
"text": "I tried all the syntax issues regarding the env file and double quotations the database name mentioned all but still this issue i am fixing it since 1 day but guide me connect with me so that i can resolve this issue.",
"username": "Aum_Shukla"
},
{
"code": "",
"text": "Screenshot (1)3286×1080 377 KB",
"username": "Aum_Shukla"
},
{
"code": "",
"text": "Remove semicolon( at the end of your connect string and try again",
"username": "Ramachandra_Tummala"
},
{
"code": "",
"text": "Also your password has special character @Aum_Shukla You need to escaped or URL encoded or change it to a simple one and try",
"username": "Ramachandra_Tummala"
}
] | [
"database-tools"
] |
|
null | Conditionnal Formating not working anymore dfor groups (in Table) | 217 | Conditionnal Formating not working anymore dfor groups (in Table) | 2023-11-03T15:54:22.184Z | [
{
"code": "",
"text": "Hello,Since a few days the conditionnal formating in table graph is not working anymore when based on comparison operator based on groups fields (example #toto > 0, background of text in red).Is there a bug or did the functionnality change ?Thank you\nSylvain",
"username": "Sylvain_Gelfi"
},
{
"code": "",
"text": "Hi Sylvain, sorry to hear for your issue.We have not changed the functionality for the conditional formatting.\nIt might be a bug but I can’t reproduce at the moment, so I will need a bit more details from you in order to investigate.Did you had the conditional formatting on an existing table chart that used to work before but suddenly stopped working last week or were you trying on a new chart? Could you please share a screenshot that shows the table, the rules and the encoding channels, so we can see the types?Like so:\ntable-conditional-formatting-rules1105×1098 74.6 KBtable-conditional-formatting-channels1103×1090 94 KB",
"username": "Kristina_Stefanova"
},
{
"code": "",
"text": "Hello, it was working before on existing tables and stopped working last week.\nAlso when I create new tables it is not working.\nTo be more precise it is NOT working for “VALUES” (like count(Countries) in your example)image1488×790 75 KB",
"username": "Sylvain_Gelfi"
},
{
"code": "",
"text": "But it is working with dynamic columns\nimage1107×577 21.2 KB",
"username": "Sylvain_Gelfi"
},
{
"code": "",
"text": "Encoding channels\nimage1453×793 82.8 KB",
"username": "Sylvain_Gelfi"
},
{
"code": "",
"text": "Thank you for the screenshots Sylvain, they are very helpful.It looks like the conditional formatting rules don’t work on decimal fields anymore.The team is working on a fix, but in the meantime a workaround is to convert the field to another numeric format. You can do this in the Chart builder by clicking on the ellipsis menu (…) of the field in the fields panel, and select “Convert Type” and then “Number”. This will remove the conditional formatting rules for this field, so you would need to add the rule again. Apologies for any inconvenience caused.Thank you for reporting the bug.\nWe will update this thread once the fix is rolled out.",
"username": "Kristina_Stefanova"
},
{
"code": "",
"text": "Hi Sylvain, we have released the patch that fixed this bug. Sorry for the inconvenience it has caused.",
"username": "James_Wang1"
},
{
"code": "",
"text": "Hello, YES! Everything is back to normal!\nThank you very much for the very quick fix Have a good day.",
"username": "Sylvain_Gelfi"
}
] | [] |
null | Ignore capitalization and diacritics (e.g. localeCompare in JS) | 208 | Ignore capitalization and diacritics (e.g. localeCompare in JS) | 2023-10-18T13:55:54.308Z | [
{
"code": "[\n\t\"Hello World\",\n\t\"hello world\",\n\t\"HELLO WORLD\",\n\t\"héllö wörld\"\n]\nthis.collection.findOne({ names: name })\n",
"text": "Hi. I’m looking for Mongo functionality to search documents based on a string that will match any variation of that string ignoring capitalization and diacritics. I am using the findOne Mongo command.JavaScript has this functionality with localeCompare: String.prototype.localeCompare() - JavaScript | MDN.For example I might search on “Hello World”, and the documents it should match might contain:To specify my use-case: I have documents with an array of names (1 or more) in each of them. Sometimes the name is written a bit differently, e.g. with an accent on one of the letters or slightly different capitalization. I will not use this functionality for long texts.I am using the Node.js Mongo client.",
"username": "Carsten"
},
{
"code": "$texttext$text$text{\n $text: {\n $search: <string>,\n $language: <string>,\n $caseSensitive: <boolean>,\n $diacriticSensitive: <boolean>\n }\n}\néêe$text",
"text": "Hi @Carsten, I think the features you’re looking for can be found within the $text operator. You would need to create a text index on the field you’re attempting to query through.According to the documentation here, you can use the $text operator on the following environments:MongoDB Atlas: The fully managed service for MongoDB deployments in the cloudMongoDB Enterprise: The subscription-based, self-managed version of MongoDBMongoDB Community: The source-available, free-to-use, and self-managed version of MongoDBThe $text operator uses the following syntax:It allows passing arguments for both your ignoring case sensitivity and diacritics.You can find documentation on the diacritic insensitivity option here.\nA caveat:Once the text index is created (at least version 3) on the field, using the $text operator will be diacritic and case insensitive unless otherwise specified.",
"username": "Jacob_Latonis"
},
{
"code": "return await this.collection.find(query, { collation, projection }).toArray();findOne",
"text": "For anyone reading this: I ended up using the collation function. I’m using the Node MongoDB client so I only needed to add a small part to my query:return await this.collection.find(query, { collation, projection }).toArray();Note I can no longer use findOne.",
"username": "Carsten"
}
] | [
"node-js"
] |
null | MongoDB Analyzer is not working | 1,575 | MongoDB Analyzer is not working | 2022-08-09T12:45:42.824Z | [
{
"code": "",
"text": "I am really interested in using MongoDB Analyzer as it is indeed very hard to figure out how to use C# driver.\nSo I installed NugetPackage in both VS2019 ans 2022 Enterprise.\nAnalyser is listed in references but it does not work. There simply aren’t any three grey dots under any expression.\nSomebody got this working? Am I missing something here?",
"username": "Kay_Zander"
},
{
"code": "",
"text": "just gladly found that it worked!Write the query code in a form like ‘collection.AsQueryable().Where(l=>l.Name.Contains(“bbb”))’ works. The key is “AsQueryable().Where”",
"username": "mx_fan"
}
] | [
"dot-net"
] |
null | SSL cert for free tier | 182 | SSL cert for free tier | 2023-11-06T09:03:53.660Z | [
{
"code": "",
"text": "I have a free, shared tier of a MongoDB Atlas version 6.0.11 cluster an SSL is enabled, so how do I get the SSL cert required to connect to it? the simple connection string is not enough",
"username": "Brennan_Ow"
},
{
"code": "",
"text": "Hi @Brennan_Ow,What is the context regarding this question? I’d recommend going over the FAQ: Security documentation for Atlas.Can you also verify what you mean by “the simple connection string is not enough”?Regards,\nJason",
"username": "Jason_Tran"
},
{
"code": "uri = \"mongodb+srv://<username>:<password>@cluster0.w029iod.mongodb.net/?retryWrites=true&w=majority\"\nclient = MongoClient(uri, server_api=ServerApi('1'))\n",
"text": "Context: To connect to my free and shared tier cluster, I’m using the python codeand sometimes it works, but sometimes I get the error stating that the SSL handshake failed.I even tried using the X.509 method with and the SSL handshake failure error message still happens 50% of the timeclient = MongoClient(uri,\ntls=True,\ntlsCertificateKeyFile=‘path/to/file.pem’,\nserver_api=ServerApi(‘1’))",
"username": "Brennan_Ow"
},
{
"code": "",
"text": "What’s the pymongo driver version you’re using and what’s the full error message being generated?Regards,\nJason",
"username": "Jason_Tran"
},
{
"code": "",
"text": "Thank you for continuing to follow up with me, but I have found a fix for it already. It turns out the ISP that I was using had some firewall configuration that did not allow me to connect to the cluster. So I outsourced the job of connecting to the cluster to a GitHub action where it’s ISP had no such issues.I came to the conclusion after reading this → Error: couldn't connect to server 127.0.0.1:27017 - #16 by Stennie_Xspecifically, this partAs for my pymongo version, it is 4.6.0The error message was as followsSSL handshake failed: <shard_id>.mongodb.net:27017: [WinError 10054] An existing connection was forcibly closed by the remote host (configured timeouts: socketTimeoutMS: 20000.0ms, connectTimeoutMS: 20000.0ms)Thank you for your time",
"username": "Brennan_Ow"
}
] | [
"python"
] |
null | Mongo create document call creating 2 different documents with a legacy schema version and another with new schema | 76 | Mongo create document call creating 2 different documents with a legacy schema version and another with new schema | 2023-11-08T21:40:56.311Z | [
{
"code": " @Prop({\n type: String,\n required: true,\n default: new Date().toISOString(),\n })\n dateAdded: string;\n @Prop({\n type: Date,\n default: new Date(),\n })\n dateAdded: Date;\n {\n \"_id\": {\n \"$oid\": \"654bfb0fd1c75afd0193f1d6\"\n },\n \"dateAdded\": {\n \"$date\": \"2023-11-07T21:31:30.373Z\"\n },\n \"__v\": 0\n },\n {\n \"_id\": {\n \"$oid\": \"654bfb1246e0213aa264584a\"\n },\n \"dateAdded\": \"2023-11-08T21:17:13.093Z\",\n \"__v\": 0\n },\n",
"text": "For context, my document’s latest schema looks like this:the legacy schema was:Now when I try to create the document in a single call, with a single payload for document body (which I checked with debuggers, for the presence of any async call being made or multiple calls being made. Which is not). I see that it is creating 2 different documents likeThe point to be noted is though they are created at the same time(verified by testing), their dateAdded field is different.",
"username": "Prasanjit_Dutta"
},
{
"code": "",
"text": "they are created at the same time(verified by testing)Really?It looks like there is 1 day difference between the 2 documents you shared. The one with the old schema is 2023-11-07 and the one with the new schema is 2023-11-08. It is far from being created at the same time. Since both schema call new Date(), I would surprised that there is a bug in new Date() that could generate 2 dates with 1 day difference if called at the same time.And what about __v:0 is both case? I suspect you are using some kind of abstraction layer and I suspect that something is missing and the new schema is not registered correctly.And why, why, why would you change a perfectly valid Date field into a ISO string? Do you really what to make your queries slower? Do you really want to make your data take more space? Date values stored as Date data type are smaller and faster than their ISO string variant.",
"username": "steevej"
}
] | [] |
null | Objective-C RLMArray property error | 77 | Objective-C RLMArray property error | 2023-11-08T23:00:05.360Z | [
{
"code": "Terminating app due to uncaught exception 'RLMException', reason: 'Property 'boards' is of type 'RLMArray<Deal_boards>' which is not a supported RLMArray object type. RLMArray can only contain instances of RLMObject subclasses. See https://www.mongodb.com/docs/realm/sdk/swift/fundamentals/relationships/#to-many-relationship for more information.'\nterminating with uncaught exception of type NSException\n\n",
"text": "I ran into trouble with my Schema and Objective-C. My App crashes with the following error. Any help will be appreciated. The class definitions were copied from the Schema editor on my Atlas account.",
"username": "Milen_Milkovski"
},
{
"code": "",
"text": "After playing around, the solution was to add implementation for each embedded class.Note to Atlas developers: please fix the schema code generator to add implementation for each of the embedded classes.",
"username": "Milen_Milkovski"
}
] | [
"swift"
] |
null | Problem installing MongoDB 6.0 on Amazon Linux 2 | 5,791 | Problem installing MongoDB 6.0 on Amazon Linux 2 | 2022-10-06T08:01:53.956Z | [
{
"code": "",
"text": "I’m also having the same issue as Install mongodb-org 5.0 on Amazon Linux 2 aarch64 architecture. how to resolve",
"username": "Simeon_Palla"
},
{
"code": "/proc/cpuinfox86_64aarch64",
"text": "Welcome to the MongoDB Community @Simeon_Palla!Please provide more details on the issue you are encountering:Aside from the typo in the original post, the repo format seems to be OK. I would follow the general tutorial to Install MongoDB Community Edition on Amazon Linux and replace x86_64 with aarch64.Thanks,\nStennie",
"username": "Stennie_X"
},
{
"code": "",
"text": "I’m trying to install MongoDB on the AWS Linux ec2 server, for my node.js app backend. but when I’m trying to install MongoDB I’m getting these errors$ sudo yum install -y mongodb-org\nLoaded plugins: extras_suggestions, langpacks, priorities, update-motd\nNo package mongodb-org available.\nError: Nothing to do\nerror1918×636 43.2 KB\n",
"username": "Simeon_Palla"
},
{
"code": "",
"text": "already tried this Install MongoDB Community Edition on Amazon Linux but not working",
"username": "Simeon_Palla"
},
{
"code": "/etc/yum.repos.d/mongodb-org-6.0.repoyum installyum repolist",
"text": "Hi @Simeon_Palla,Did you create the /etc/yum.repos.d/mongodb-org-6.0.repo file before running yum install ?What is the output of yum repolist?Thanks,\nStennie",
"username": "Stennie_X"
},
{
"code": "x86_64",
"text": "Hi @Simeon_Palla ,Please also confirm the hardware architecture your EC2 instance is using (x86_64, Graviton, etc).Thanks,\nStennie",
"username": "Stennie_X"
},
{
"code": "sudo yum install -y mongodb-org",
"text": "Hello @Stennie_X ! so I am having the same issue hereI have created the file as requested and still sudo yum install -y mongodb-org returns “No package mongodb-org available”I have tried to install it on similar machine and it worked just finehardware architecture: aarch64",
"username": "Ella_Mozes"
},
{
"code": "",
"text": "I’m having the same issue with amazon linux:Checking the repolist it seems that MongoDB 6 has much less entries:amzn-updates/latest amzn-updates-Base 7,548\nmongodb-org-3.4 MongoDB Repository 150\nmongodb-org-3.6 MongoDB Repository 144\nmongodb-org-4.0 MongoDB Repository 170\nmongodb-org-4.2 MongoDB Repository 120\nmongodb-org-4.4 MongoDB Repository 196\nmongodb-org-5.0 MongoDB Repository 177\nmongodb-org-6.0 MongoDB Repository 39",
"username": "Nicolas_Dickreuter"
},
{
"code": "",
"text": "Hi Stennie,\nI am trying to install Mongodb 6.0 in Amazon linux (https://www.mongodb.com/docs/manual/tutorial/install-mongodb-on-amazon/). But getting error as:package mongodb-org-6.0.6-1.amzn2.x86_64 requires mongodb-org-database, but none of the providers can be installed[root@ip-172-31-37-199 ~]# yum repolist\nrepo id repo name\namazonlinux Amazon Linux 2023 repository\nkernel-livepatch Amazon Linux 2023 Kernel Livepatch repository\nmongodb-org-6.0 MongoDB Repository[root@ip-172-31-37-199 ~]# aws --version\naws-cli/2.9.19 Python/3.9.16 Linux/6.1.29-47.49.amzn2023.x86_64 source/x86_64.amzn.2023 prompt/offPlease help",
"username": "Shivangi_Agarwal"
},
{
"code": "",
"text": "Hi Shivangi,\nDid you get any solution for your issue ? I’m hvaing a similar issue.\nError:\nProblem: conflicting requests[root@ip--21-10- ~]# aws --version\naws-cli/2.9.19 Python/3.9.16 Linux/6.1.34-59.116.amzn2023.x86_64 source/x86_64.amzn.2023 prompt/off",
"username": "vijay_shankar_Singh"
},
{
"code": "",
"text": "Hello all! I was having this issue today following the Install MongoDB Community Edition on Amazon Linux tutorial.I switched from the Amazon Linux 2 tab over to the Amazon Linux 2022 tab,\nresulting in a different base url in the yum repo file. This seemed to do the trick! Install complete.",
"username": "armslice_N_A"
}
] | [] |
null | DBA Practice Questions and Exam Objectives | 113 | DBA Practice Questions and Exam Objectives | 2023-11-07T11:55:14.821Z | [
{
"code": "",
"text": "Hello,I am preparing for my DBA associate exam and there are a couple of questions I have regarding the syllabus and practice questions. I have already passed my developer exam.There is a question in the practice exam about an error in shard keys (more specifically, question 9). In the exam objective section of the exam study guide, the only learning objective about sharding is 1.4 Identify the function of sharding. Is this type of question valid for the exam? If it is, I’ll have to go a lot deeper in sharding.Ditto for question 10 on profiling, there are no learning objectives on profiling though it appeared in the learning path. I’d like to understand if I will need to go deeper in profiling for the exam too. The only similar objective is 3.1 on index performance, which is the same objective in the developer pathway, but there are no indications on the exam objective that profiling is included.(Not a question) I believe exam objective 2.17 is a duplicate of exam objective 2.1.More generally I’d like to understand if all exam questions will conform to the exam objectives in the DBA exam study guide. Your help is very much appreciated.",
"username": "Marcus_Peck"
},
{
"code": "",
"text": "@Marcus_Peck Hello and thanks for reaching out. In order to better assist you, please send this request over to [email protected]\nThank you!",
"username": "Heather_Davis"
}
] | [] |
$near 2d legacy distance measure | 1,537 | $near 2d legacy distance measure | 2020-03-22T09:17:12.758Z | [
{
"code": "",
"text": "In the below link, I found that $near 2d legacy use radians as distance measure.{ $near: [ , ], $maxDistance: }However, when I tested queries in the mongo shell, I got confused because it seems like $near 2d legacy use degrees as distance measure.db.restaurants.find({ ‘address.coord’ :{ $near : [ -73.9, 40.7 ], $maxDistance : 2/111.1 }}) #degreesdb.restaurants.find({ ‘address.coord’ :{ $near : [ -73.9, 40.7 ], $maxDistance : 2/6378.1}} ) #radiansI checked that the first query with degrees gave correct answer, not the second one with radians.Does $near 2d legacy use degrees or radians as distance measure?Is there anyone who can solve my problem?Thanks.",
"username": "Juhun_Kim"
},
{
"code": "",
"text": "Anyone can confirm/double-check on this please?",
"username": "MBee"
}
] | [
"queries"
] |
|
Disable autoIndexing on Mongo serverless instance | 94 | Disable autoIndexing on Mongo serverless instance | 2023-11-08T11:43:06.761Z | [
{
"code": "",
"text": "I want to enable/disable autoindexing for my Serverless Mongo instance using api call or terraform config.I cound not find nether of them in documentation.From internet site of my mongo cluster I went to inspect window and I got api call which is called from mongo internet site to update/get autoindexing value. This is the url.\nhttps://cloud.mongodb.com/performanceAdvisor/groups/groupid/serverless/serverlessid/autoIndexingIs there any official information about where I can get for api call or terraform resource that can change autoindexing settings?",
"username": "Marija_R"
},
{
"code": "",
"text": "Hi MarijaThanks for the question. The Auto-Indexing feature for Serverless Instances is currently in private preview and is rolling out to customers over the next few weeks. Therefore, it is possible that your instance has not been enabled for it yet. We will add a “private preview” tag in our documentation to denote this.In the first iteration, auto-indexing will be a UI only feature. I’d like to understand more about your use case and how you’d like to use it using terraform or an API call. Feel free to reply on this thread or direct message me.Best,\nAnurag Kadasne",
"username": "Anurag_Kadasne"
}
] | [
"serverless",
"api"
] |
|
null | Getting this error - MongoNotConnectedError: Client must be connected before running operations | 8,763 | Getting this error - MongoNotConnectedError: Client must be connected before running operations | 2023-03-28T19:55:40.589Z | [
{
"code": "// Code to require the parts needed for seedsindex to work correctly\nconst mongoose = require('mongoose');\nconst MusicProduct = require('../database_models/musicproduct');\nconst BookProduct = require('../database_models/bookproduct');\n\nconst musicAlbums = require('./musicseeds');\nconst bookNovels = require('./bookseeds');\n\n// Connnect to MongoDB\nmongoose.connect('mongodb://127.0.0.1/music-bookApp');\nmongoose.set('strictQuery', false);\n\n// Logic to check that the database is connected properly\nmongoose.connection.on('error', console.error.bind(console, 'connection error:'));\nmongoose.connection.once('open', () => {\n console.log('Database connected');\n});\n\n//Fill the Music products database with 20 random albums taken from the music seeds file\nconst musicSeedDB = async () => {\n await MusicProduct.deleteMany({});\n for (let i = 0; i < 20; i++) {\n const randomMusic20 = Math.floor(Math.random() * 20);\n //const musicStock = Math.floor(Math.random() * 10) + 1;\n const musicItem = new MusicProduct({\n artistName: musicAlbums[randomMusic20].artist,\n albumName: musicAlbums[randomMusic20].title,\n //musicStock\n })\n await musicItem.save();\n }\n};\n\n//Fill the Book products database with 20 random books taken from the music seeds file\nconst bookSeedDB = async () => {\n await BookProduct.deleteMany({});\n for (let i = 0; i < 20; i++) {\n const randomBook20 = Math.floor(Math.random() * 20);\n //const bookStock = Math.floor(Math.random() * 10) + 1;\n const bookItem = new BookProduct({\n bookAuthor: bookNovels[randomBook20].authors,\n bookName: bookNovels[randomBook20].title,\n //ookStock\n })\n await bookItem.save();\n }\n};\n\n// Close the connection to DB after finish seeding\nmusicSeedDB().then(() => {\n mongoose.connection.close();\n});\n\nbookSeedDB().then(() => {\n mongoose.connection.close();\n});\n",
"text": "Hi All,I have recently started on a project at my University, and part of this project is including a seeds file to seed a DB with test information. Previously, this has worked fine but now I am getting the following error messages every time I run the seeds file in node.js:Database connected\nD:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:24\nthrow new error_1.MongoNotConnectedError(‘Client must be connected before running operations’);\n^MongoNotConnectedError: Client must be connected before running operations\nat executeOperationAsync (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:24:19)\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:12:45\nat maybeCallback (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\utils.js:338:21)\nat executeOperation (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\operations\\execute_operation.js:12:38)\nat Collection.insertOne (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongodb\\lib\\collection.js:148:57)\nat NativeCollection. [as insertOne] (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\drivers\\node-mongodb-native\\collection.js:226:33)\nat Model.$__handleSave (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\model.js:309:33)\nat Model.$__save (D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\mongoose\\lib\\model.js:388:8)\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\kareem\\index.js:387:18\nat D:\\OUWork\\Year 6\\TM470\\Project\\node_modules\\kareem\\index.js:113:15 {\n[Symbol(errorLabels)]: Set(0) {}\n}Node.js v18.12.1For reference (if it helps), here is the seeds file I have created and run:To be fair, the seeds file still seems to run as the database does update with the seeded information, but I would much rather get to the bottom of the error so I can stop it appearing.Thank you for your help in advance ",
"username": "gary_easton"
},
{
"code": "#!/usr/bin/env node\nimport { MongoClient } from 'mongodb';\nimport { spawn } from 'child_process';\nimport fs from 'fs';\n\nconst DB_URI = 'mongodb://0.0.0.0:27017';\nconst DB_NAME = 'DB name goes here';\nconst OUTPUT_DIR = 'directory output goes here';\nconst client = new MongoClient(DB_URI);\n\nasync function run() {\n try {\n await client.connect();\n const db = client.db(DB_NAME);\n const collections = await db.collections();\n\n if (!fs.existsSync(OUTPUT_DIR)) {\n fs.mkdirSync(OUTPUT_DIR);\n }\n\n collections.forEach(async (c) => {\n const name = c.collectionName;\n await spawn('mongoexport', [\n '--db',\n DB_NAME,\n '--collection',\n name,\n '--jsonArray',\n '--pretty',\n `--out=./${OUTPUT_DIR}/${name}.json`,\n ]);\n });\n } finally {\n await client.close();\n console.log(`DB Data for ${DB_NAME} has been written to ./${OUTPUT_DIR}/`);\n }\n}\nrun().catch(console.dir);\nconst mongoose = require('Mongoose');\nmongoose.connect(\"MongoDB://localhost:<PortNumberHereDoubleCheckPort>/<DatabaseName>\", {useNewUrlParser: true});\nconst <nameOfDbschemahere> = new mongoose.schema({\n name: String,\n rating: String,\n quantity: Number,\n someothervalue: String,\n somevalue2: String,\n});\n\nconst Fruit<Assuming as you call it FruitsDB> = mongoose.model(\"nameOfCollection\" , <nameOfSchemeHere>);\n\nconst fruit = new Fruit<Because FruitsDB calling documents Fruit for this>({\n name: \"Watermelon\",\n rating: 10,\n quantity: 50,\n someothervalue: \"Pirates love them\",\n somevalue2: \"They are big\",\n});\nfruit.save();\n",
"text": "Take a look at these two example scripts, first is Node.JS, second is Mongoose.The points I want to drive home with the first, is how the connections to the DB are being established and verified before the rest of the operations. And comparatively to how similar connections work with Mongoose, as you can choose to use Mongoose for redundancy to ensure the client connection if you’d like.Mongoose:Mongoose Script",
"username": "Brock"
},
{
"code": "",
"text": "Could it be because you wroteawait client.close();",
"username": "anont_mon"
},
{
"code": "",
"text": "error === {message : “Client must be connected before running operations”}\ni am facing this type of error so many times i worked it but i couldn’t fix that bug",
"username": "Madhesh_Siva"
},
{
"code": "",
"text": "Yes, you are absolutely right…",
"username": "Zahidul_Islam_Sagor"
}
] | [
"node-js",
"mongoose-odm"
] |
null | I accidentally deleted cluster0, please restore cluster0 | 125 | I accidentally deleted cluster0, please restore cluster0 | 2023-11-06T14:52:24.554Z | [
{
"code": "",
"text": "I accidentally deleted cluster0, please restore cluster0",
"username": "Api_Sport"
},
{
"code": "",
"text": "I am pretty sure that it cannot be done.You could create another one with the same name. Hopefully, you have a backup of your important data.",
"username": "steevej"
}
] | [] |
null | Unable to build Go app with cse tag | 121 | Unable to build Go app with cse tag | 2023-11-07T16:52:01.268Z | [
{
"code": "csebrew info libmongocrypt\n==> mongodb/brew/libmongocrypt: stable 1.8.2, HEAD\nC library for Client Side Encryption\nhttps://github.com/mongodb/libmongocrypt\n/opt/homebrew/Cellar/libmongocrypt/1.8.2 (44 files, 9.4MB) *\n Built from source on 2023-11-07 at 11:28:05\nFrom: https://github.com/mongodb/homebrew-brew/blob/HEAD/Formula/libmongocrypt.rb\nLicense: Apache-2.0\n==> Dependencies\nBuild: cmake ✔, mongo-c-driver ✔\n==> Options\n--HEAD\n\tInstall HEAD version\ngo build -ldflags \"-X 'main.version=f0205c2409aa47ce7ac836784f364468af074380'\" -tags cse -o /path/to/go/project/bin/apiserver cmd/apiserver/apiserver.go\n# command-line-arguments\n/usr/local/go/pkg/tool/darwin_arm64/link: running clang failed: exit status 1\nld: warning: search path '/opt/homebrew/Cellar/libmongocrypt/1.8.1/lib' not found\nld: library 'mongocrypt' not found\nclang: error: linker command failed with exit code 1 (use -v to see invocation)\nlibmongocrypt v1.11.7",
"text": "I have followed the steps here to install libmongocrypt and added the cse tag to my go build.Build results:This was working for me before but I believe i installed libmongocrypt before the release on Sept 5. I was helping a team member get set up so I uninstalled to walk through the installation with them and now it’s failing for both of us.Our go-mongodriver version is v1.11.7Any suggestions?",
"username": "Kevin_Rathgeber"
},
{
"code": "> pkg-config --debug --cflags --libs libmongocrypt\n\nError printing enabled by default due to use of output options besides --exists, --atleast/exact/max-version or --list-all. Value of --silence-errors: 0\nError printing enabled\nAdding virtual 'pkg-config' package to list of known packages\nLooking for package 'libmongocrypt'\nLooking for package 'libmongocrypt-uninstalled'\nReading 'libmongocrypt' from file '/opt/homebrew/lib/pkgconfig/libmongocrypt.pc'\nParsing package file '/opt/homebrew/lib/pkgconfig/libmongocrypt.pc'\n line>Name: mongocrypt\n line>Description: The libmongocrypt client-side field level encryption library.\n line>Version: 1.8.2\n line>Requires:\n line>Requires.private:\n line>prefix=/opt/homebrew/Cellar/libmongocrypt/1.8.2\n Variable declaration, 'prefix' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2'\n line>includedir=${prefix}/include/mongocrypt\n Variable declaration, 'includedir' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt'\n line>libdir=${prefix}/lib\n Variable declaration, 'libdir' has value '/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib'\n line>Libs: -L${libdir} -lmongocrypt\n line>Cflags: -I${includedir}\nPath position of 'libmongocrypt' is 1\nAdding 'libmongocrypt' to list of known packages\n post-recurse: libmongocrypt\nadding CFLAGS_OTHER string \"\"\n post-recurse: libmongocrypt\n original: libmongocrypt\n sorted: libmongocrypt\nadding CFLAGS_I string \"-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt \"\n post-recurse: libmongocrypt\n original: libmongocrypt\n sorted: libmongocrypt\nadding LIBS_L string \"-L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib \"\n post-recurse: libmongocrypt\nadding LIBS_OTHER | LIBS_l string \"-lmongocrypt \"\nreturning flags string \"-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt -L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib -lmongocrypt\"\n-I/opt/homebrew/Cellar/libmongocrypt/1.8.2/include/mongocrypt -L/opt/homebrew/Cellar/libmongocrypt/1.8.2/lib -lmongocrypt\n",
"text": "Also here is the pkg-config debug for libmongocrypt:",
"username": "Kevin_Rathgeber"
},
{
"code": "go clean -cache",
"text": "This was resolved by running go clean -cache as described in this Github issue",
"username": "Kevin_Rathgeber"
}
] | [
"field-encryption"
] |
null | Nested array operation for $all does not seem to work in mongodb | 742 | Nested array operation for $all does not seem to work in mongodb | 2023-06-08T17:33:08.529Z | [
{
"code": "$all[\n {\n \"words\": [ \"Hello\", \"World\", \"!\"],\n \"wordArrays\": [\n [\"Hello\", \"World\", \"!\"],\n [\"The\", \"Sun\", \"Shines\"]\n ]\n }\n]\n{words: {$all: [\"Hello\", \"World\"]}}{words: {$all: [\"Hello\", \"I do not exist\"]}}{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}$elemMatch$elemMatchwordArrayswordArrays{wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}wordArrays",
"text": "I am trying to find documents where any or all nested array elements must contain at least the values provided. This can normally be achieved easily with the $all operator. For example here:The query {words: {$all: [\"Hello\", \"World\"]}} correctly matches because both are contained and {words: {$all: [\"Hello\", \"I do not exist\"]}} does not match. So far so good.If you attempt the same for the nested arrays this stops working altogether. {\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}} does not match. I have found similar questions why on nested arrays all the operators start to fail but no one ever can explain why and they just use $elemMatch. But I do not know how I would translate my “all values must be in the array” to an $elemMatch. And then how I would sayI have prepared this little playground with the data in the hopes somebody knows how to achieve this.\nPlaygroundI would prefer if this could be solved all within the Find() stage, but if operations like this suddenly require the aggregation pipeline I am fine with that too.My approach for ANY match does not return any matches, and I do not understand why {wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}. I read it as wordArrays: does any element match: array contains “Hello” and “World”. And that should have matched the documentThank you!",
"username": "Arkensor"
},
{
"code": "{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}Atlas atlas-cihc7e-shard-0 [primary] test> db.server.find()\n[\n {\n _id: ObjectId(\"652e5a208c360b950bb50eea\"),\n word: 'Hello',\n words: [ 'Hello', 'World', '!' ],\n wordArrays: [ [ 'Hello', 'World', '!' ], [ 'The', 'Sun', 'Shines' ] ]\n }\n]\n{wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}wordArrays$elematchAtlas atlas-cihc7e-shard-0 [primary] test> db.server.find({wordArrays: {$elemMatch: {$all: [[\"Hello\", \"World\", \"!\"]]}}})\n[\n {\n _id: ObjectId(\"652e5a208c360b950bb50eea\"),\n word: 'Hello',\n words: [ 'Hello', 'World', '!' ],\n wordArrays: [ [ 'Hello', 'World', '!' ], [ 'The', 'Sun', 'Shines' ] ]\n }\n]\nwordArrayswordArrays",
"text": "Hi @Arkensor and welcome to MongoDB community forums!!I apologise for getting back to you so late.If you attempt the same for the nested arrays this stops working altogether. {\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}} does not match.The find query would not work in the above mentioned format. However, if your sample document looks like:the query:Atlas atlas-cihc7e-shard-0 [primary] test> db.server.find( {“wordArrays”: { “$all”: [[“Hello”,“World”,“!”]] } })\n[\n{\n_id: ObjectId(“652e5a208c360b950bb50eea”),\nword: ‘Hello’,\nwords: [ ‘Hello’, ‘World’, ‘!’ ],\nwordArrays: [ [ ‘Hello’, ‘World’, ‘!’ ], [ ‘The’, ‘Sun’, ‘Shines’ ] ]\n}\n]\nwould give the result as the ‘[“Hello”,“World”,“!”]’ matches the complete element at index 0.My approach for ANY match does not return any matches, and I do not understand why {wordArrays: {$elemMatch: {$all: [\"Hello\", \"World\"]}}}. I read it as wordArrays: does any element match: array contains “Hello” and “World”. And that should have matched the documentIf you wish to use $elematch to match for a specific element in the array, you can use the query asIf you wish to use aggregations, you can make use of $arrayElemAt to project element at a specific index value.Also, could you clarify with sample documents and expected response which explains the below statements.Please feel free to reach out in case of any further questions.Regards\nAasawari",
"username": "Aasawari"
},
{
"code": "{\"wordArrays.0\": {$all: [\"Hello\", \"World\"]}}\ndb.collection.find({ words: {\"$all\": [\"Hello\",\"World\"]} })\n[{\"wordArrays\": [[ \"Hello\",\"World\",\"!\"],[\"The\",\"Sun\",\"Shines\"]]}]\n",
"text": "Hello @Aasawari,thank you for getting back to me on this. To clarify a few things:The first example was intentionally about checking the 0th item. I expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed. On data data present on the 0th item it shall be checked that: ALL the values [“Hello”, “World”] are present on the data. If so return true true. False otherwise.The reason why I say it should work like this is because on the example playground, I linked it works if I have a non-nested array.So my expectation as user is that if I manually navigate the field accessor for the nested array to the 0th item, the data he inspects is the same as if I ask the query to do it on a field that only has this one array as data.\nI would consider this a bug / lack of feature support. Which is why I opened https://jira.mongodb.org/browse/SERVER-77974the query: db.server.find( {“wordArrays”: { “$all”: [[“Hello”,“World”,“!”]] } }) …\nwould give the result as the ‘[“Hello”,“World”,“!”]’ matches the complete element at index 0.That is true but not what I was asking about here. That is an EXACT match on ANY of the items to find find the index. My first example was about already having the index and wanting to validate if I find the required data there or not.As for the second part of my question, this was about achieving my overall goal. The issue raised over “array.INDEX” query not working as expected was just a finding I had while trying to get there.My goal is exactly what I describe there. I have multiple documents. They each contain the “wordArrays” field which holds multiple arrays. I now want to check what I wrote on all the documents.So for the 1. one given the data below, I expect the query I am looking for to match since there was one nested array that contained both “Hello” and “World”. On the same data if I looked for “Not” and “Exists” it should not match because none of the arrays contained both these strings.For the 2. query I want to not find just one matching nested array, but I want to validate that ALL the nested arrays must contain both the values. Given the same data example as used above this should fail, because “Hello” and “World” are present in the 0th array, but the 1st contains neither of the words.Writing these queries is easy if you just inspect one array that is not nested. I however have data that I can not and do not want to change in any way and want to query the nested arrays to do these ANY and ALL operations on them. I am looking for those two examples because from those two working queries, I should be able to derive the other variants of any/all nested arrays inspected for any/all item matches.",
"username": "Arkensor"
},
{
"code": "$alldb.collection.find({\n \"wordArrays.0\":{\n \"$all\":[\n \"Hello\",\n \"World\"\n ]\n }\n})\ndb.collection.find({\n \"$and\":[\n {\n \"wordArrays.0\":\"Hello\"\n },\n {\n \"wordArrays.0\":\"World\"\n }\n ]\n})\n\"wordArray\"[ \"Hello\",\"World\",\"!\"]\"Hello\"\"World\"\"Hello\"\"World\"Atlas atlas-cihc7e-shard-0 [primary] test> db.testall.find( {$or: [{'wordsArray.0': ['hello', 'world']}, {'wordsArray.1': ['hello', 'world']}, {'wordsArray.2': ['hello', 'world']} ] })\n[\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'hello', 'world' ]\n ]\n }\n]\nAtlas atlas-cihc7e-shard-0 [primary] test> db.testall.find()\n[\n {\n _id: ObjectId(\"654b206d57b6dc078e3deea4\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world', '1' ], [ 'the', 'sum', 'flower' ] ]\n },\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'hello', 'world' ]\n ]\n },\n {\n _id: ObjectId(\"654b26d457b6dc078e3deea6\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world' ], [ 'hello', 'world' ] ]\n }\n]\nAtlas atlas-cihc7e-shard-0 [primary] test> db.testall.find( {$and: [{'wordsArray.0': ['hello', 'world']}, {'wordsArray.1': ['hello', 'world']} ] })\n[\n {\n _id: ObjectId(\"654b26d457b6dc078e3deea6\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [ [ 'hello', 'world' ], [ 'hello', 'world' ] ]\n }\n]\n",
"text": "Hi @Arkensor thank you for the detailsI expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed.My understanding is that using the dot notation references the specific element at the array index as opposed to the contents of the specified element at the array index. If we combine this with the example noted in the behaviour portion of the $all operator documentation, we can say that the query you mentioned:Is equivalent to:I expect this code to work as it reads as follows: On the document go into “wordArrays” property. It is an array and the 0th item shall be accessed.In saying the above, we can see the element in the array \"wordArray\" at index 0 for the is [ \"Hello\",\"World\",\"!\"]. Neither the string \"Hello\" or \"World\" match this so nothing is returned. The use of dot notation here does not “access” the array as you have mentioned. I.e. in this example, use of the dot not notation will not look into the contents of this sub-array to determine if it contains the strings and instead will just check if the specified element is equal to the strings \"Hello\" and \"World\".I would consider this a bug / lack of feature support. Which is why I openedFrom the above example and the documentation, I believe this works as the expected. However, i understand this may not be the behavour you expect so this may be better suited as a feedback post as opposed to SERVER ticket. Please raise a feedback request in the MongoDB Feedback Engine or upvote for an existing/similar request if present.So for the 1. one given the data below, I expect the query I am looking for to match since there was one nested array that contained both “Hello” and “World”. On the same data if I looked for “Not” and “Exists” it should not match because none of the arrays contained both these strings.If I understand correctly, you are looking for a query which would give all the documents which contains [‘hello’,‘world’] irrespective of the index position.\nIf your wordsArray size is small, you can use a query like:where in my sample data, [‘hello’,‘world’] is at the 2nd index.I request your assistance in providing a sample dataset and outlining the anticipated response if the provided data does not align with your requirements.For the 2. query I want to not find just one matching nested array, but I want to validate that ALL the nested arrays must contain both the values. Given the same data example as used above this should fail, because “Hello” and “World” are present in the 0th array, but the 1st contains neither of the words.Consider the sample data contains:If you use the query with $and, the below query would return.Could you outline the particular reason or use case for this schema design?Regards\nAasawari",
"username": "Aasawari"
},
{
"code": "[\n {\n _id: ObjectId(\"654b212957b6dc078e3deea5\"),\n words: [ 'hello', 'world', '1' ],\n wordsArray: [\n [ 'the', 'sum', 'flower' ],\n [ 'not', 'exists' ],\n [ 'justoneword' ],\n [ 'many', 'many', 'many', 'many', 'wordsinonearray' ],\n <<<TRUNCATED - 500 MORE ARRAY ITEMS HERE >>>,\n [ 'hello', 'world' ]\n ]\n }\n]\n",
"text": "Hello again @Aasawari,that explanation is rather helpful. I can see now how the index accessor syntax together with $all was not what I expected. So that is fair enough.About the other query. Yes if it was only a few items it could be hard coded with indices, however, my data could have 0…N amount of wordArrays where each array can have 0…N number elements.I am not directly in control of the data. I am writing a translation layer of user-defined queries into MongoDB, hence I can not make any assumptions on the data. I just know it is an array of arrays of strings and to find matches where N user-provided strings are present. I have some ugly solution with using $map to flatten the nested arrays into booleans if they contain all search words or not, and then check if the outer array contained “true” anywhere, but I am not sure if there could maybe be a nicer option.The data I have given you is exactly my use case. There is no other data that I can give you to make this any more clear.Thank you very much",
"username": "Arkensor"
}
] | [
"aggregation",
"queries",
"dot-net"
] |
null | Heartbeat topic configuration for Kafka Source Connector | 3,403 | Heartbeat topic configuration for Kafka Source Connector | 2021-09-10T11:54:21.879Z | [
{
"code": "",
"text": "Hi.We are considering using heartbeat mechanism to prevent the loss of the resume token when reading changes from a collection.Our current scenario involves 6 collections being polled, each by one separate connector.So far, we haven’t found any documentation about the heartbeat topic parameters. Any advise on how to choose, topic retention, topic partition number, compaction… or any other relevant heartbeat topic configs?Also, given our scenario of several connectors, is it safe to share the heartbeat topic or should we create a separate topic per connector?Thanks",
"username": "Jorge_Lopez_Castro"
},
{
"code": "",
"text": "Would love to get some guidance on this as well. We also have the multiple connectors configuration and am not sure if we should create one topic per connector, or if it’s safe for them to share.",
"username": "Kevin_Languasco"
},
{
"code": "",
"text": "We are facing the very same issue too and can’t find any answer:It would be highly appreciated if anyone would be able to answer those questions.\nThanks a lot in advance!",
"username": "Jan_de_Wilde"
}
] | [
"kafka-connector"
] |
End of preview. Expand
in Dataset Viewer.
YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co/docs/hub/datasets-cards)
This is a full view of the data from the MongoDB community (exclude all posts without any responses)
- Downloads last month
- 11