MongoDB高可用集群搭建
1、环境准备
# 启动时须要使用非root用户,全部建立一个mongo用户: useradd mongo # 为mongo用户添加密码: echo 123456 | passwd --stdin mongo # 将mongo添加到sudoers echo "mongo ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/mongo chmod 0440 /etc/sudoers.d/mongo #解决sudo: sorry, you must have a tty to run sudo问题,在/etc/sudoer注释掉 Default requiretty 一行 sudo sed -i 's/Defaults requiretty/Defaults:chiansun !requiretty/' /etc/sudoers # 建立一个mongo目录 mkdir /mongo # 给相应的目录添加权限 chown -R mongo:mongo /mongo # 配置mongo的yum源 cat >> /etc/yum.repos.d/mongodb-org-4.0.repo << EOF [mongodb-org-4.0] name=MongoDB Repository baseurl=http://repo.mongodb.org/yum/redhat/\$releasever/mongodb-org/4.0/x86_64/ gpgcheck=1 enabled=1 gpgkey=https://www.mongodb.org/static/pgp/server-4.0.asc EOF # 关闭selinux sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config setenforce 0 # 关闭防火墙 systemctl disable firewalld systemctl stop firewalld
2、主机规划
192.168.33.14 node-1 192.168.33.15 node-2 192.168.33.16 node-3 node-1 node-2 node-3 mongos mongos mongos 路由服务器,寻址 config config config 配置服务器,保存配置 shard1主 shard2主 shard3主 分片:保存数据 shard2从 shard3从 shard1从 副本集:备份数据,能够配置读写分离(主负责写,从负责同步数据和读) shard3从 shard1从 shard2从
3、安装部署
# 分别在多台机器上使用mongo用户登陆 sudo yum install -y mongodb-org # 分别在多台机器上建立mongo config server对应的目录 mkdir -p /mongo/config/{log,data,run} # 分别在多台机器上修改config server的配置文件 cat > /mongo/config/mongod.conf << EOF systemLog: destination: file logAppend: true path: /mongo/config/log/mongod.log storage: dbPath: /mongo/config/data journal: enabled: true processManagement: fork: true pidFilePath: /mongo/config/run/mongod.pid net: port: 27100 bindIp: 0.0.0.0 replication: replSetName: config sharding: clusterRole: configsvr EOF # 启动全部的mongo config server服务 mongod --config /mongo/config/mongod.conf # 登陆任意一台配置服务器,初始化配置副本集 mongo --port 27100 # 建立配置 # id名要和replSetName名保持一致 config = { _id : "config", members : [ {_id : 0, host : "192.168.33.14:27100" }, {_id : 1, host : "192.168.33.15:27100" }, {_id : 2, host : "192.168.33.16:27100" } ] } # 初始化副本集配置 rs.initiate(config) # 查看分区状态 rs.status() # 注意:其中,"_id" : "config"应与配置文件中配置的 replicaction.replSetName 一致,"members" 中的 "host" 为三个节点的ip和port
1.配置第一个分片和副本集
# 修改mongo shard1 server的配置文件 mkdir -p /mongo/shard1/{log,data,run} # 分别在多台机器上修改shard1 server的配置文件 cat > /mongo/shard1/mongod.conf << EOF systemLog: destination: file logAppend: true path: /mongo/shard1/log/mongod.log storage: dbPath: /mongo/shard1/data journal: enabled: true processManagement: fork: true pidFilePath: /mongo/shard1/run/mongod.pid net: port: 27001 bindIp: 0.0.0.0 replication: replSetName: shard1 sharding: clusterRole: shardsvr EOF # 启动全部的shard1 server mongod --config /mongo/shard1/mongod.conf # 登录任意一台shard1服务器,初始化副本集 mongo --port 27001 # 使用admin数据库 use admin # 定义副本集配置 config = { _id : "shard1", members : [ {_id : 0, host : "192.168.33.14:27001" }, {_id : 1, host : "192.168.33.15:27001" }, {_id : 2, host : "192.168.33.16:27001" } ] } # 初始化副本集配置 rs.initiate(config); # 查看分区状态 rs.status()
2.配置第二个分片和副本集
# 修改mongo shard2 server的配置文件 mkdir -p /mongo/shard2/{log,data,run} # 分别在多台机器上修改shard2 server的配置文件 cat > /mongo/shard2/mongod.conf << EOF systemLog: destination: file logAppend: true path: /mongo/shard2/log/mongod.log storage: dbPath: /mongo/shard2/data journal: enabled: true processManagement: fork: true pidFilePath: /mongo/shard2/run/mongod.pid net: port: 27002 bindIp: 0.0.0.0 replication: replSetName: shard2 sharding: clusterRole: shardsvr EOF # 启动全部的shard2 server mongod --config /mongo/shard2/mongod.conf # 登录任意一台shard2服务器,初始化副本集 mongo --port 27002 # 使用admin数据库 use admin # 定义副本集配置 config = { _id : "shard2", members : [ {_id : 0, host : "192.168.33.14:27002" }, {_id : 1, host : "192.168.33.15:27002" }, {_id : 2, host : "192.168.33.16:27002" } ] } # 初始化副本集配置 rs.initiate(config) # 查看分区状态 rs.status()
3.配置第三个分片和副本集
# 修改mongo shard3 server的配置文件 mkdir -p /mongo/shard3/{log,data,run} # 分别在多台机器上修改shard3 server的配置文件 cat > /mongo/shard3/mongod.conf << EOF systemLog: destination: file logAppend: true path: /mongo/shard3/log/mongod.log storage: dbPath: /mongo/shard3/data journal: enabled: true processManagement: fork: true pidFilePath: /mongo/shard3/run/mongod.pid net: port: 27003 bindIp: 0.0.0.0 replication: replSetName: shard3 sharding: clusterRole: shardsvr EOF # 启动全部的shard3 server mongod --config /mongo/shard3/mongod.conf # 登录任意一台的shard3服务器,初始化副本集 mongo --port 27003 # 使用admin数据库 use admin # 定义副本集配置 config = { _id : "shard3", members : [ {_id : 0, host : "192.168.33.14:27003" }, {_id : 1, host : "192.168.33.15:27003" }, {_id : 2, host : "192.168.33.16:27003" } ] } # 初始化副本集配置 rs.initiate(config) # 查看分区状态 rs.status()
4.配置mongos路由器
##### 注意:启动mongos是守候进程是由于/mongo/mongos/mongod.conf缺乏了fork: true这个配置####### ------------------------------------------------------------------------------------------ mkdir -p /mongo/mongos/{log,data,run} # 添加mongs的配置文件 cat > /mongo/mongos/mongod.conf << EOF systemLog: destination: file logAppend: true path: /mongo/mongos/log/mongod.log processManagement: fork: true pidFilePath: /mongo/mongos/run/mongod.pid net: port: 27200 bindIp: 0.0.0.0 sharding: configDB: config/192.168.33.14:27100,192.168.33.15:27100,192.168.33.16:27100 EOF # 注意,这里configDB后面的config要与配置服务器的_id保持一致 # 启动路由服务器 mongos --config /mongo/mongos/mongod.conf # 登陆其中的一台路由节点,手动启用分片 mongo --port 27200 # 添加分片到mongos sh.addShard("shard1/192.168.33.14:27001,192.168.33.15:27001,192.168.33.16:27001") sh.addShard("shard2/192.168.33.15:27002,192.168.33.16:27002,192.168.33.14:27002") sh.addShard("shard3/192.168.33.16:27003,192.168.33.14:27003,192.168.33.15:27003") # 设置slave可读 rs.slaveOk()
5.经常使用操做
#没有分片是由于没有开启分片规则 # 对bike这个数据库开启分片功能 use admin db.runCommand({"enablesharding":"bike"}) # 对bike数据库下的users集合按id的hash进行分片 db.runCommand({"shardcollection":"bike.users","key":{_id:'hashed'}}) # 启动全部的config server mongod --config /mongo/config/mongod.conf # 启动全部的shard1 mongod --config /mongo/shard1/mongod.conf # 启动全部的shard2 mongod --config /mongo/shard2/mongod.conf # 启动全部的shard3 mongod --config /mongo/shard3/mongod.conf # 启动全部的mongos mongos --config /mongo/mongos/mongod.conf # 关闭服务 mongod --shutdown --dbpath /mongo/shard3/data mongod --shutdown --dbpath /mongo/shard2/data mongod --shutdown --dbpath /mongo/shard1/data mongod --shutdown --dbpath /mongo/config/data