bonnie | bonnie-plot | bonnie-steroids | lab/fsbench
Here we are comparing GFS2/DLM (lala/) vs THINLVM/DRBD (lili/) virtual disks
flags="-u root -x 5 -r 1024" mkdir -p /data/guests/slack1/lala/ mkdir -p /data/guests/slack1/lili/ rm -f /var/tmp/bonnie-results
drbdadm status res-data # dual primary cd /data/guests/slack1/ dd if=/dev/zero of=slack1.ext4 bs=1G count=0 seek=5 mkfs.ext4 slack1.ext4 mount slack1.ext4 lala/ bonnie++ $flags -m gfs2-ext4 -d lala/ | tee -a /var/tmp/bonnie-results umount lala/ rmdir lala/ rm -f slack1.ext4
lvcreate --thin -V 5G vdisks/thinpool -n slack1 ls -lF /dev/vdisks/slack* vi /etc/drbd.conf drbdadm adjust res-slack1 drbdadm create-md res-slack1 drbdadm up res-slack1 drbdadm status
WAIT UNTIL IT GETS IN SYNC, and only then
drbdadm primary res-slack1 drbdadm status res-slack1 #single primary mkfs.ext4 /dev/drbd1 mount /dev/drbd1 lili/ bonnie++ $flags -m thindrbd-ext4 -d lili/ | tee -a /var/tmp/bonnie-results bonnie++ $flags -m thindrbd-protoB-ext4 -d lili/ | tee -a /var/tmp/bonnie-results bonnie++ $flags -m thindrbd-protoA-ext4 -d lili/ | tee -a /var/tmp/bonnie-results ls -alF lili/ rm -f lili/Bonnie.* umount lili/ rmdir lili/
drbdadm status res-data # dual primary
cd /data/guests/
    for n in `seq 1 9`; do
    mkdir -p /data/guests/slack$n/lala/
    mkdir -p /data/guests/slack$n/lili/
    cd slack$n/
    dd if=/dev/zero of=slack$n.ext4 bs=1G count=0 seek=5
    mkfs.ext4 slack$n.ext4
    mount /data/guests/slack$n/slack$n.ext4 /data/guests/slack$n/lala/
    cd ../
done; unset n
for n in `seq 2 9`; do
    lvcreate --thin -V 5G vdisks/thinpool -n slack$n
done; unset n
for n in `seq 2 9`; do
cat <<EOF >> /etc/drbd.conf
resource res-slack$n {
        protocol A;
        device          /dev/drbd$n;
        meta-disk       internal;
        on pro5s1 {
                disk    /dev/vdisks/slack$n;
                address x.x.x.x:770$n;
        }
        on pro5s2 {
                disk    /dev/vdisks/slack$n;
                address x.x.x.x:770$n;
        }
}
EOF
done; unset n
change the IP accordingly and replicate the configuration across your cluster
scp /etc/drbd.conf ...
both nodes
drbdadm adjust all
for n in `seq 2 9`; do
    drbdadm create-md res-slack$n
    drbdadm attach res-slack$n
    drbdadm connect res-slack$n
done; unset n
for n in `seq 5 9`; do
    drbdadm primary --force res-slack$n
done; unset n
for n in `seq 5 9`; do
    mkfs.ext4 /dev/drbd$n
    mount /dev/drbd$n /data/guests/slack$n/lili/
done; unset n
for n in `seq 5 9`; do
    echo starting job for mount-$n and waiting 3 seconds
    bonnie++ -u root -x 5 -r 1024 -m mount-$n-gfs2-ext4 -d /data/guests/slack$n/lala/ \
        > /var/tmp/bonnie-mount-$n 2> /var/tmp/bonnie-mount-$n.error &
    echo starting job for thindrbd-$n and waiting 3 seconds
    bonnie++ -u root -x 5 -r 1024 -m thindrbd-$n-ext4 -d /data/guests/slack$n/lili/ \
        > /var/tmp/bonnie-thindrbd-$n 2> /var/tmp/bonnie-thindrbd-$n.error &
    sleep 3
done; unset n
for n in `seq 1 4`; do
    drbdadm primary --force res-slack$n
done; unset n
for n in `seq 1 4`; do
    mkfs.ext4 /dev/drbd$n
    mount /dev/drbd$n /data/guests/slack$n/lili/
done; unset n
for n in `seq 1 4`; do
    echo starting job for mount-$n and waiting 3 seconds
    bonnie++ -u root -x 5 -r 1024 -m mount-$n-gfs2-ext4 -d /data/guests/slack$n/lala/ \
        > /var/tmp/bonnie-mount-$n 2> /var/tmp/bonnie-mount-$n.error &
    echo starting job for thindrbd-$n and waiting 3 seconds
    bonnie++ -u root -x 5 -r 1024 -m thindrbd-$n-ext4 -d /data/guests/slack$n/lili/ \
        > /var/tmp/bonnie-thindrbd-$n 2> /var/tmp/bonnie-thindrbd-$n.error &
    sleep 3
done; unset n
#rm -f lala/Bonnie.* umount lala/ rmdir lala/ rm -f slack1.ext4 #rm -f /var/tmp/bonnie-mount-*