[root@lab8106 ~]# ceph fs new ceph metadata data new fs with metadata pool 1 and data pool 2 [root@lab8106 ~]# systemctl start ceph-mds@lab8106 [root@lab8106 ~]# mount -t ceph 192.168.8.106:/ /mnt [root@lab8106 ~]# ll /mnt total 0
[root@lab8106 mds]# systemctl stop ceph-mds@lab8106 [root@lab8106 mds]# for a in `cat metalist`;do rados -p metadata rollback $a snap1;done; rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 ···
[root@lab8106 mds]# mount -t ceph 192.168.8.106:/ /mnt [root@lab8106 mds]# ll /mnt total 0 drwxr-xr-x 1 root root 3577 Dec 30 2015 celt051-0.5.1.3 drwxr-xr-x 1 root root 1787 Mar 7 2016 centos-logos-70.0.6 drwxr-xr-x 1 root root 20192 Mar 7 2016 centos-release drwxr-xr-x 1 root root 19768 Dec 21 15:04 ceph drwxr-xr-x 1 root root 13572 Sep 9 17:21 ceph-deploy-1.5.34 drwxr-xr-x 1 root root 147227 Mar 7 2016 certmonger-0.78.4
如果数据被不小心清空了
上面是基于重建fs情况下的恢复,下面来个更极端的,元数据池的对象全部被删除了
1 2 3
[root@lab8106 mds]# for a in `rados -p metadata ls`;do rados -p metadata rm $a ;done; [root@lab8106 mds]# rados -p metadata ls [root@lab8106 mds]# systemctl restart ceph-mds@lab8106
这个时候查看ceph -s状态,mds都无法启动,我们来做下恢复
1 2 3 4 5 6 7 8 9 10 11 12 13
[root@lab8106 mds]# systemctl stop ceph-mds@lab8106 [root@lab8106 mds]# ceph mds fail 0 [root@lab8106 mds]# ceph fs rm ceph --yes-i-really-mean-it [root@lab8106 mds]# ceph fs new ceph metadata data [root@lab8106 mds]# for a in `cat metalist`;do rados -p metadata rollback $a snap1;done; rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 rolled back pool metadata to snapshot snap1 ··· [root@lab8106 mds]# rados -p metadata ls|wc -l 20 [root@lab8106 mds]# systemctl start ceph-mds@lab8106