0
点赞
收藏
分享

微信扫一扫

MongoDB双机集群操作记录

db.cursors.find()
db.cursors.find({a:3})
db.shards.find();
printShardingStatus();

db.printShardingStatus()

db.cloud0.find()

use test
db.createCollection("cloud1")

db.runCommand( { shardcollection : "test.cloud0", key : {"_id" : 1} } )
db.runCommand( { shardcollection : "test.cursors", key : {"_id" : 1} } )

db.createCollection("testtab3",{size:100000})
--db.testtab3.ensureIndex({"num1":1,"num2":2,"num3":3,"num4":4,});
db.testtab3.ensureIndex({"num1":1,"num2":2,"num3":3,"num4":4,},{unique:true});
db.testtab3.getIndexes()
db.system.indexes.find()


--db.runCommand( { shardcollection : "test.testtab3", key : {"_id":1} } )
--在对集合生成唯一索引后,用_id分片会报错
-- "errmsg" : "can't shard collection with unique index on: { v: 1, key: { num1: 1.0, num2: 2.0, num3: 3.0, num4: 4.0 }, unique: true, ns: \"test.testtab3\", name: \"num1_1_num2_2_num3_3_num4_4\" }"
--只能用唯一索引进行分片
db.runCommand( { shardcollection : "test.testtab3", key: {num1: 1, num2: 1, num3: 1, num4: 1 } } )

db.testtab3.status();
db.printShardingStatus()

use clusterdb;
create table testtab3(id INT NOT NULL AUTO_INCREMENT,num1 int, num2 int,num3 int,num4 int,PRIMARY KEY (id))engine=ndb;
ALTER TABLE testtab3 ADD INDEX idx_testtab3_md5 (num1,num2,num3,num4);
desc testtab3;

[mongodb@rac1 data]$ mongod --shardsvr --dbpath /data/db --fork --logpath /data/log/shard216.log --logappend
all output going to: /data/log/shard216.log
forked process: 6745
[mongodb@rac1 data]$

[mongodb@rac1 data]$ cat /data/log/shard216.log

***** SERVER RESTARTED *****


Wed Aug 22 23:11:35 [initandlisten] MongoDB starting : pid=6745 port=27018 dbpath=/data/db 64-bit host=rac1
Wed Aug 22 23:11:35 [initandlisten] db version v2.0.7, pdfile version 4.5
Wed Aug 22 23:11:35 [initandlisten] git version: 875033920e8869d284f32119413543fa475227bf
Wed Aug 22 23:11:35 [initandlisten] build info: Linux ip-10-2-29-40 2.6.21.7-2.ec2.v1.2.fc8xen #1 SMP Fri Nov 20 17:48:28 EST 2009 x86_64 BOOST_LIB_VERSION=1_41
Wed Aug 22 23:11:35 [initandlisten] options: { dbpath: "/data/db", fork: true, logappend: true, logpath: "/data/log/shard216.log", shardsvr: true }
Wed Aug 22 23:11:35 [initandlisten] journal dir=/data/db/journal
Wed Aug 22 23:11:35 [initandlisten] recover : no journal files present, no recovery needed
Wed Aug 22 23:11:35 [initandlisten] waiting for connections on port 27018
Wed Aug 22 23:11:35 [websvr] admin web console waiting for connections on port 28018
Wed Aug 22 23:12:35 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
Wed Aug 22 23:17:35 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
Wed Aug 22 23:22:35 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
[mongodb@rac1 data]$

[mongodb@rac2 data]$ mongod --shardsvr --dbpath /data/db --fork --logpath /data/log/shard217.log --logappend
all output going to: /data/log/shard217.log
forked process: 527
[mongodb@rac2 data]$

[mongodb@rac2 data]$ cat /data/log/shard217.log


***** SERVER RESTARTED *****


Wed Aug 22 23:11:58 [initandlisten] MongoDB starting : pid=527 port=27018 dbpath=/data/db 64-bit host=rac2
Wed Aug 22 23:11:58 [initandlisten] db version v2.0.7, pdfile version 4.5
Wed Aug 22 23:11:58 [initandlisten] git version: 875033920e8869d284f32119413543fa475227bf
Wed Aug 22 23:11:58 [initandlisten] build info: Linux ip-10-2-29-40 2.6.21.7-2.ec2.v1.2.fc8xen #1 SMP Fri Nov 20 17:48:28 EST 2009 x86_64 BOOST_LIB_VERSION=1_41
Wed Aug 22 23:11:58 [initandlisten] options: { dbpath: "/data/db", fork: true, logappend: true, logpath: "/data/log/shard217.log", shardsvr: true }
Wed Aug 22 23:11:58 [initandlisten] journal dir=/data/db/journal
Wed Aug 22 23:11:58 [initandlisten] recover : no journal files present, no recovery needed
Wed Aug 22 23:11:58 [initandlisten] waiting for connections on port 27018
Wed Aug 22 23:11:58 [websvr] admin web console waiting for connections on port 28018
Wed Aug 22 23:12:58 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
Wed Aug 22 23:17:58 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
Wed Aug 22 23:22:58 [clientcursormon] mem (MB) res:13 virt:116 mapped:0
[mongodb@rac2 data]$

[mongodb@rac2 data]$ mongod --configsvr --dbpath /data/config --fork --logpath /data/log/config217.log --logappend
forked process: 582
[mongodb@rac2 data]$ all output going to: /data/log/config217.log


***** SERVER RESTARTED *****


Wed Aug 22 23:22:07 [initandlisten] MongoDB starting : pid=582 port=27019 dbpath=/data/config 64-bit host=rac2
Wed Aug 22 23:22:07 [initandlisten] db version v2.0.7, pdfile version 4.5
Wed Aug 22 23:22:07 [initandlisten] git version: 875033920e8869d284f32119413543fa475227bf
Wed Aug 22 23:22:07 [initandlisten] build info: Linux ip-10-2-29-40 2.6.21.7-2.ec2.v1.2.fc8xen #1 SMP Fri Nov 20 17:48:28 EST 2009 x86_64 BOOST_LIB_VERSION=1_41
Wed Aug 22 23:22:07 [initandlisten] options: { configsvr: true, dbpath: "/data/config", fork: true, logappend: true, logpath: "/data/log/config217.log" }
Wed Aug 22 23:22:07 [initandlisten] journal dir=/data/config/journal
Wed Aug 22 23:22:07 [initandlisten] recover : no journal files present, no recovery needed
Wed Aug 22 23:22:07 [initandlisten] waiting for connections on port 27019
Wed Aug 22 23:22:07 [websvr] admin web console waiting for connections on port 28019
[mongodb@rac2 data]$


[mongodb@rac1 data]$ mongos --configdb 192.168.16.217 --fork --logpath /data/log/configdb217.log --logappend
all output going to: /data/log/configdb217.log
forked process: 6824
[mongodb@rac1 data]$ cag /data/log/configdb217.log
-bash: cag: command not found
[mongodb@rac1 data]$ cat /data/log/configdb217.log




***** SERVER RESTARTED *****




Wed Aug 22 23:22:44 mongos db version v2.0.7, pdfile version 4.5 starting (--help for usage)
Wed Aug 22 23:22:44 git version: 875033920e8869d284f32119413543fa475227bf
Wed Aug 22 23:22:44 build info: Linux ip-10-2-29-40 2.6.21.7-2.ec2.v1.2.fc8xen #1 SMP Fri Nov 20 17:48:28 EST 2009 x86_64 BOOST_LIB_VERSION=1_41
Wed Aug 22 23:22:46 [mongosMain] waiting for connections on port 27017
Wed Aug 22 23:22:46 [websvr] admin web console waiting for connections on port 28017
Wed Aug 22 23:22:46 [Balancer] about to contact config servers and shards
Wed Aug 22 23:22:46 [Balancer] config servers and shards contacted successfully
Wed Aug 22 23:22:46 [Balancer] balancer id: rac1:27017 started at Aug 22 23:22:46
Wed Aug 22 23:22:46 [Balancer] creating WriteBackListener for: 192.168.16.217:27019 serverID: 5034f94646b461fa75f9f24e
Wed Aug 22 23:22:46 [LockPinger] creating distributed lock ping thread for 192.168.16.217:27019 and process rac1:27017:1345648966:1804289383 (sleeping for 30000ms)
Wed Aug 22 23:22:47 [Balancer] distributed lock 'balancer/rac1:27017:1345648966:1804289383' acquired, ts : 5034f94646b461fa75f9f24f
Wed Aug 22 23:22:47 [Balancer] distributed lock 'balancer/rac1:27017:1345648966:1804289383' unlocked.
[mongodb@rac1 data]$


mongos> db.runCommand({addshard:"192.168.16.216:27019",name:"shard1"})
{
"ok" : 0,
"errmsg" : "couldn't connect to new shard mongos connectionpool: connect failed 192.168.16.216:27019 : couldn't connect to server 192.168.16.216:27019"
}


mongos> db.addUser("root","abcd");
{
"singleShard" : "192.168.16.217:27019",
"n" : 0,
"connectionId" : 6,
"err" : null,
"ok" : 1
}
{
"user" : "root",
"readOnly" : false,
"pwd" : "1a0f1c3c3aa1d592f490a2addc559383",
"_id" : ObjectId("5034faee5322b075a8dc2a76")
}
mongos>




mongos> db.runCommand({addshard:"192.168.16.217:27019",name:"shard2"})
{ "shardAdded" : "shard2", "ok" : 1 }
mongos> db.runCommand( { listshards : 1 } );
{
"shards" : [
{
"_id" : "shard2",
"host" : "192.168.16.217:27019"
}
],
"ok" : 1
}
mongos>




mongos> db.runCommand( { listshards : 1 } );
{
"shards" : [
{
"_id" : "shard2",
"draining" : true,
"host" : "192.168.16.217:27019"
}
],
"ok" : 1
}
mongos> db.runCommand({ removeshard : "shard2" })
{
"msg" : "removeshard completed successfully",
"state" : "completed",
"shard" : "shard2",
"ok" : 1
}
mongos> db.runCommand( { listshards : 1 } );
{ "shards" : [ ], "ok" : 1 }




mongos> db.runCommand({addshard:"192.168.16.216:27018",name:"shard1"})
{ "shardAdded" : "shard1", "ok" : 1 }
mongos> db.runCommand({addshard:"192.168.16.217:27018",name:"shard2"})
{ "shardAdded" : "shard2", "ok" : 1 }
mongos>


mongos> db.runCommand( { enablesharding : "test" } )
{ "ok" : 1 }
mongos> db.runCommand( { shardcollection : "test.people", key : {name : 1} } )
{ "collectionsharded" : "test.people", "ok" : 1 }
mongos>


mongos> db.runCommand({"serverStatus" : 1 })
{
"host" : "rac1",
"version" : "2.0.7",
"process" : "mongos",
"uptime" : 1169,
"localTime" : ISODate("2012-08-22T15:42:13.097Z"),
"mem" : {
"resident" : 2,
"virtual" : 120,
"supported" : true
},
"connections" : {
"current" : 2,
"available" : 817
},
"extra_info" : {
"note" : "fields vary by platform",
"heap_usage_bytes" : 269712,
"page_faults" : 1
},
"opcounters" : {
"insert" : 1,
"query" : 5,
"update" : 0,
"delete" : 0,
"getmore" : 0,
"command" : 46
},
"ops" : {
"sharded" : {
"insert" : 0,
"query" : 0,
"update" : 0,
"delete" : 0,
"getmore" : 0,
"command" : 0
},
"notSharded" : {
"insert" : 1,
"query" : 5,
"update" : 0,
"delete" : 0,
"getmore" : 0,
"command" : 46
}
},
"shardCursorType" : {


},
"asserts" : {
"regular" : 0,
"warning" : 0,
"msg" : 0,
"user" : 2,
"rollovers" : 0
},
"network" : {
"bytesIn" : 3974,
"bytesOut" : 5604,
"numRequests" : 52
},
"ok" : 1
}
mongos>


mongos> config = db.getSisterDB("config")
config
mongos> config.databases.find()
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }


mongos> printShardingStatus();
--- Sharding Status ---
sharding version: { "_id" : 1, "version" : 3 }
shards:
{ "_id" : "shard1", "host" : "192.168.16.216:27018" }
{ "_id" : "shard2", "host" : "192.168.16.217:27018" }
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
test.people chunks:
shard1 1
{ "name" : { $minKey : 1 } } -->> { "name" : { $maxKey : 1 } } on : shard1 { "t" : 1000, "i" : 0 }


mongos>


mongos> db.addUser("root","abcd");
{
"singleShard" : "192.168.16.217:27019",
"updatedExisting" : true,
"n" : 1,
"connectionId" : 6,
"err" : null,
"ok" : 1
}
{
"_id" : ObjectId("5034faee5322b075a8dc2a76"),
"user" : "root",
"readOnly" : false,
"pwd" : "1a0f1c3c3aa1d592f490a2addc559383"
}
mongos>


mongos> db.users.find();
{ "_id" : ObjectId("5035012f5322b075a8dc2a77"), "a" : 3, "b" : 5 }


mongos> db.users.find({},{a:1,b:1})
{ "_id" : ObjectId("5035012f5322b075a8dc2a77"), "a" : 3, "b" : 5 }




mongos> show collections
mycol1
people
records
system.indexes
users
mongos> db.mycol1.stats();
{
"sharded" : false,
"primary" : "shard1",
"ns" : "test.mycol1",
"count" : 0,
"size" : 0,
"storageSize" : 8192,
"numExtents" : 1,
"nindexes" : 1,
"lastExtentSize" : 8192,
"paddingFactor" : 1,
"flags" : 1,
"totalIndexSize" : 8176,
"indexSizes" : {
"_id_" : 8176
},
"ok" : 1
}
mongos> db.runCommand({isdbgrid:1});
{ "isdbgrid" : 1, "hostname" : "rac1", "ok" : 1 }
mongos> db.runCommand({ismaster:1});
{
"ismaster" : true,
"msg" : "isdbgrid",
"maxBsonObjectSize" : 16777216,
"ok" : 1
}
mongos>


mongos> db.records.stats();
{
"sharded" : false,
"primary" : "shard1",
"ns" : "test.records",
"count" : 2,
"size" : 112,
"avgObjSize" : 56,
"storageSize" : 4096,
"numExtents" : 1,
"nindexes" : 1,
"lastExtentSize" : 4096,
"paddingFactor" : 1,
"flags" : 1,
"totalIndexSize" : 8176,
"indexSizes" : {
"_id_" : 8176
},
"ok" : 1
}
mongos> db.records.find();
{ "_id" : ObjectId("50349847d050000000000000"), "user_id" : ObjectId("50349847d050000000000001"), "items" : [ ] }
{ "_id" : ObjectId("50349853f750000000000000"), "user_id" : ObjectId("50349853f750000000000001"), "items" : [ ] }
mongos>




mongos> db.getCollectionNames()
[
"cloud0",
"cursors",
"mycol1",
"people",
"records",
"system.indexes",
"users",
"users1"
]
mongos> db.cloud0.find();


--========================================
--指定集合分片


mongos> db.getCollectionNames()
[
"cloud0",
"cursors",
"mycol1",
"people",
"records",
"system.indexes",
"users",
"users1"
]
mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: { "_id" : 1, "version" : 3 }
shards:
{ "_id" : "shard1", "host" : "192.168.16.216:27018" }
{ "_id" : "shard2", "host" : "192.168.16.217:27018" }
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
test.people chunks:
shard1 1
{ "name" : { $minKey : 1 } } -->> { "name" : { $maxKey : 1 } } on : shard1 { "t" : 1000, "i" : 0 }
{ "_id" : "mydb", "partitioned" : false, "primary" : "shard2" }


mongos> db.runCommand( { shardcollection : "test.cloud0", key : {"_id" : 1} } )
{ "ok" : 0, "errmsg" : "access denied - use admin db" }
mongos> use admin
switched to db admin
mongos> db.runCommand( { shardcollection : "test.cloud0", key : {"_id" : 1} } )
{ "collectionsharded" : "test.cloud0", "ok" : 1 }
mongos>


mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: { "_id" : 1, "version" : 3 }
shards:
{ "_id" : "shard1", "host" : "192.168.16.216:27018" }
{ "_id" : "shard2", "host" : "192.168.16.217:27018" }
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
test.cloud0 chunks:
shard1 1
{ "_id" : { $minKey : 1 } } -->> { "_id" : { $maxKey : 1 } } on : shard1 { "t" : 1000, "i" : 0 }
test.people chunks:
shard1 1
{ "name" : { $minKey : 1 } } -->> { "name" : { $maxKey : 1 } } on : shard1 { "t" : 1000, "i" : 0 }
{ "_id" : "mydb", "partitioned" : false, "primary" : "shard2" }

--=========================================

举报

相关推荐

0 条评论