GlusterFS文件系统1

Gluster是一个分布式文件系统。它是各种不同的存储服务器之上的组合,这些服务器由以太网或无限带宽技术Infiniband以及远程直接内存访问RDMA互相融汇,最终所形成的一个大的并行文件系统网络。它有包括云计算在内的多重应用,诸如:生物医药科学,文档存储。Gluster是由GNU托管的自由软件,证书是AGPL。Gluster公司是Gluster的首要商业赞助商,且提供商业产品以及基于Gluster的解决方案。

1、环境:

hostname                                      ip
YQD-intranet-salt-master                      192.168.1.63
YQD-Intranet-DB-NO1                           192.168.1.66
YQD-Intranet-DB-NO2                           192.168.1.64
YQD-Intranet-DB-NO3                           192.168.1.244

2、安装:
安装方式可以分为源码和rpm包安装,下载地址:http://www.gluster.org/download/ 这里我们以yum安装为例展开学习,四台机器安装如下:

# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo
# yum install glusterfs
# yum install glusterfs-server
# /etc/init.d/glusterd start
Starting glusterd:                                         [  OK  ]
 
# chkconfig glusterfsd on

3、在192.168.1.63上面配置整个glustersfs集群:

# gluster peer probe 192.168.1.63
peer probe: success. Probe on localhost not needed
# gluster peer probe 192.168.1.64
peer probe: success.
# gluster peer probe 192.168.1.66
peer probe: success.
# gluster peer probe 192.168.1.244
peer probe: success.

查看状态:

 
# gluster peer status
Number of Peers: 3
Hostname: 192.168.1.66
Uuid: 7ab89229-fce2-4f86-a61f-d7b17a3c7308
State: Peer in Cluster (Connected)
Hostname: 192.168.1.64
Uuid: 33febb2d-08a2-4676-a86e-5312aa00f934
State: Peer in Cluster (Connected)
Hostname: 192.168.1.244
Uuid: a64a1ffb-c2e6-4171-a178-02e06cd6ad0e
State: Peer in Cluster (Connected)

创建数据存放目录(四台一样):

mkdir /data/v3_upload

在192.168.1.63上创建glusterFS磁盘:

# gluster volume create v3_upload replica 4 192.168.1.63:/data/v3_upload/ 192.168.1.66:/data/v3_upload/ 192.168.1.64:/data/v3_upload/ 192.168.1.244:/data/v3_upload/
volume create: v3_upload: success: please start the volume to access data
# gluster volume start v3_upload
volume start: v3_upload: success
# gluster volume info
 
Volume Name: v3_upload
Type: Replicate
Volume ID: 2b1c361c-e711-4ad0-96c4-ca51110bc84a
Status: Started
Number of Bricks: 1 x 4 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.1.63:/data/v3_upload
Brick2: 192.168.1.66:/data/v3_upload
Brick3: 192.168.1.64:/data/v3_upload
Brick4: 192.168.1.244:/data/v3_upload

这是一个replicate的小案例,下面从客户端web应用的服务器(不在环境介绍之中)中只读挂载测试:

# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo
# yum install glusterfs
# yum install glusterfs-fuse
 
# mkdir /data/v3_upload
# mount -t glusterfs -o ro 192.168.1.63:v3_upload /data/v3_upload/
# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/sda2              20G  1.5G   17G   9% /
tmpfs                 1.9G     0  1.9G   0% /dev/shm
/dev/sda1             194M   27M  158M  15% /boot
/dev/mapper/vg_web-LogVol00
                      251G  1.3G  237G   1% /data
/dev/sda3              20G  217M   19G   2% /home
192.168.1.63:v3_upload
                      251G  802M  238G   1% /data/v3_upload

测试挂载点写入数据(注:这里挂载的是192.168.1.66):

# umount /data/v3_upload/
# mount -t glusterfs 192.168.1.66:v3_upload /data/v3_upload/
# echo "this is 192.168.1.66" > /data/v3_upload/test.txt
# mkdir /data/v3_upload/testdir

随便选一台服务端查看:

# ll -h
total 12K
drwxr-xr-x 2 root root 4.0K Jul  1 14:20 testdir
-rw-r--r-- 2 root root   21 Jul  1 14:21 test.txt

四台内容一致,数据写入成功,从192.168.1.63上修改test.txt文件,并在创建testdir2目录:

# cat test.txt 
this is 192.168.1.66 create
this is 192.168.1.63 modified
# ll -h
total 16K
drwxr-xr-x 2 root root 4.0K Jul  1 14:20 testdir
drwxr-xr-x 2 root root 4.0K Jul  1 14:23 testdir2
-rw-r--r-- 2 root root   58 Jul  1 14:23 test.txt
 
从客户端查看:
# cat test.txt 
cat: test.txt: Input/output error
 
# ll -h
total 4.5K
drwxr-xr-x 2 root root 4.0K Jul  1 14:20 testdir
-rw-r--r-- 1 root root   21 Jul  1 14:21 test.txt
数据写入失败

在192.168.1.66上创建新的test2.txt文件和testdir3目录:

# ll -h
total 20K
-rw-r--r-- 2 root root   24 Jul  1 14:33 test2.txt
drwxr-xr-x 2 root root 4.0K Jul  1 14:20 testdir
drwxr-xr-x 2 root root 4.0K Jul  1 14:33 testdir3
从客户端查看:
# ll -h
total 8.5K
-rw-r--r-- 1 root root   24 Jul  1  2014 test2.txt
drwxr-xr-x 2 root root 4.0K Jul  1 14:20 testdir
drwxr-xr-x 2 root root 4.0K Jul  1  2014 testdir3

两者内容一致,当一gluster服务器被挂到客户端时,被挂载的这台具有读写权限,并同步到其他gluster上,从gluster集群里其他机器则没有写入权限.会导致报错。
最终结论:
在数据目录中直接写入数据,会导致其它节点因为得不到通知而使数据同步失败,正确的做法是所有的读写操作都通过挂载点来进行。

Glusterfs3.2.4/5支持五种Volume,即Distribute卷、Stripe卷、Replica卷、Distribute stripe卷和Distribute replica卷,这五种卷可以满足不同应用对高性能、高可用的需求。
(1)distribute volume:分布式卷,文件通过hash算法分布到brick server上,这种卷是glusterfs的基础和最大特点;
(2)stripe volume:条带卷,类似RAID0,条带数=brick server数量,文件分成数据块以Round Robin方式分布到brick server上,并发粒度是数据块,大文件性能高;
(3)replica volume:镜像卷,类似RAID1,镜像数=brick server数量,所以brick server上文件数据相同,构成n-way镜像,可用性高;
(4)distribute stripe volume:分布式条带卷,brick server数量是条带数的倍数,兼具distribute和stripe卷的特点;
(5)distribute replica volume:分布式镜像卷,brick server数量是镜像数的倍数,兼具distribute和replica卷的特点;

下面我们删除glusterfs磁卷,进行相关卷的测试(在这里我们觉得Distributed-Replicate卷最具有安全性,所以重点介绍一下这块的,其他的内容稍提一下!具体测试有机会再试):

# gluster volume stop v3_upload
Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y
volume stop: v3_upload: success
 
# gluster volume delete v3_upload
Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y
volume delete: v3_upload: success

stripe卷:

# gluster volume create v3_upload stripe 4 192.168.1.63:/data/v3_upload/ 192.168.1.244:/data/v3_upload/ 192.168.1.64:/data/v3_upload/ 192.168.1.66:/data/v3_upload/
# gluster volume start v3_upload
# gluster volume info
 
Volume Name: v3_upload
Type: Stripe
Volume ID: 70457b69-348c-40e7-8831-1d283d2a5c25
Status: Created
Number of Bricks: 1 x 4 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.1.63:/data/v3_upload
Brick2: 192.168.1.244:/data/v3_upload
Brick3: 192.168.1.64:/data/v3_upload
Brick4: 192.168.1.66:/data/v3_upload
# mount -t glusterfs -o ro 192.168.1.63:v3_upload /data/v3_upload/
# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/sda2              20G  1.5G   17G   9% /
tmpfs                 1.9G     0  1.9G   0% /dev/shm
/dev/sda1             194M   27M  158M  15% /boot
/dev/mapper/vg_web-LogVol00
                      251G  1.3G  237G   1% /data
/dev/sda3              20G  217M   19G   2% /home
192.168.1.63:v3_upload
                      2.4T  9.0G  2.3T   1% /data/v3_upload

Distributed Striped卷:

# gluster volume create raid10 stripe 2 transport tcp 192.168.1.63:/data/v3_upload/ 192.168.1.64:/data/v3_upload/ 192.168.1.66:/data/v3_upload/ 192.168.1.244:/data/v3_upload/
# gluster volume start
# gluster volume info
 
Volume Name: raid10
Type: Distributed-Stripe
Volume ID: 80bcc349-92c3-4839-8287-16462a9657f5
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.1.63:/data/v3_upload
Brick2: 192.168.1.64:/data/v3_upload
Brick3: 192.168.1.66:/data/v3_upload
Brick4: 192.168.1.244:/data/v3_upload
 
Task Status of Volume raid10
------------------------------------------------------------------------------
 
Task Status of Volume raid10
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@YQD-intranet-salt-master v3_upload]# gluster volume status;
Status of volume: raid10
Gluster process						Port	Online	Pid
------------------------------------------------------------------------------
Brick 192.168.1.63:/data/v3_upload			49152	Y	2594
Brick 192.168.1.64:/data/v3_upload			49165	Y	12001
Brick 192.168.1.66:/data/v3_upload			49165	Y	2272
Brick 192.168.1.244:/data/v3_upload			49157	Y	6882
NFS Server on localhost					2049	Y	2606
NFS Server on 192.168.1.244				2049	Y	6894
NFS Server on 192.168.1.66				2049	Y	2284
NFS Server on 192.168.1.64				2049	Y	12013

Distributed Replicated Volumes:

# gluster volume create  dr replica 2 transport tcp 192.168.1.63:/data/dr/ 192.168.1.64:/data/dr/ 192.168.1.66:/data/dr/ 192.168.1.244:/data/dr/
volume create: dr: success: please start the volume to access data
# gluster volume start dr
volume start: dr: success
# gluster volume info
 
Volume Name: dr
Type: Distributed-Replicate
Volume ID: c1aade6d-d2b9-4ff1-854a-89f97cf63c8f
Status: Started
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.1.63:/data/dr
Brick2: 192.168.1.64:/data/dr
Brick3: 192.168.1.66:/data/dr
Brick4: 192.168.1.244:/data/dr
# gluster volume status
Status of volume: dr
Gluster process						Port	Online	Pid
------------------------------------------------------------------------------
Brick 192.168.1.63:/data/dr				49153	Y	14432
Brick 192.168.1.64:/data/dr				49166	Y	12723
Brick 192.168.1.66:/data/dr				49166	Y	2458
Brick 192.168.1.244:/data/dr				49158	Y	7609
NFS Server on localhost					2049	Y	14446
Self-heal Daemon on localhost				N/A	Y	14451
NFS Server on 192.168.1.244				2049	Y	7623
Self-heal Daemon on 192.168.1.244			N/A	Y	7628
NFS Server on 192.168.1.64				2049	Y	12737
Self-heal Daemon on 192.168.1.64			N/A	Y	12743
NFS Server on 192.168.1.66				2049	Y	2472
Self-heal Daemon on 192.168.1.66			N/A	Y	2477
 
Task Status of Volume dr
------------------------------------------------------------------------------
There are no active volume tasks

客户端挂载:

# mount -t glusterfs 192.168.1.63:dr /data/dr/
# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/sda2              20G  1.5G   17G   9% /
tmpfs                 1.9G     0  1.9G   0% /dev/shm
/dev/sda1             194M   27M  158M  15% /boot
/dev/mapper/vg_web-LogVol00
                      251G  1.2G  237G   1% /data
/dev/sda3              20G  217M   19G   2% /home
192.168.1.63:dr       683G  3.5G  645G   1% /data/dr

配置文件:

volume dr-client-0
    type protocol/client
    option send-gids true
    option transport-type tcp
    option remote-subvolume /data/dr
    option remote-host 192.168.1.63
end-volume
 
volume dr-client-1
    type protocol/client
    option send-gids true
    option transport-type tcp
    option remote-subvolume /data/dr
    option remote-host 192.168.1.64
end-volume
 
volume dr-client-2
    type protocol/client
    option send-gids true
    option transport-type tcp
    option remote-subvolume /data/dr
    option remote-host 192.168.1.66
end-volume
 
volume dr-client-3
    type protocol/client
    option send-gids true
    option transport-type tcp
    option remote-subvolume /data/dr
    option remote-host 192.168.1.244
end-volume
 
volume dr-replicate-0
    type cluster/replicate
    subvolumes dr-client-0 dr-client-1
end-volume
 
volume dr-replicate-1
    type cluster/replicate
    subvolumes dr-client-2 dr-client-3
end-volume
 
volume dr-dht
    type cluster/distribute
    subvolumes dr-replicate-0 dr-replicate-1
end-volume

从这里可以看出:63 64一组 salt-master和NO2这两台主机的内容是一样的 而NO1和NO3的内容是一致的,在实际的环境中,你不可能一次性坏一组服务器吧,那真是太杯具了!!!!上传文件测试如下:

[root@YQD-intranet-salt-master dr]# salt 'YQD-Intranet-DB-NO1' cmd.run 'ls /data/dr'
YQD-Intranet-DB-NO1:
    aaa.txt
    glusterfs-3.5.1.tar.gz
    lmnp.tar.gz
[root@YQD-intranet-salt-master dr]# salt 'YQD-Intranet-DB-NO2' cmd.run 'ls /data/dr'
YQD-Intranet-DB-NO2:
    mysql-proxy-0.8.4.tar.gz
[root@YQD-intranet-salt-master dr]# salt 'YQD-Intranet-DB-NO3' cmd.run 'ls /data/dr'
YQD-Intranet-DB-NO3:
    aaa.txt
    glusterfs-3.5.1.tar.gz
    lmnp.tar.gz
[root@YQD-intranet-salt-master dr]# ls /data/dr/
mysql-proxy-0.8.4.tar.gz

还没有评论,快来抢沙发!

发表评论

  • 😉
  • 😐
  • 😡
  • 😈
  • 🙂
  • 😯
  • 🙁
  • 🙄
  • 😛
  • 😳
  • 😮
  • emoji-mrgree
  • 😆
  • 💡
  • 😀
  • 👿
  • 😥
  • 😎
  • ➡
  • 😕
  • ❓
  • ❗
  • 70 queries in 0.467 seconds