替换坏硬盘重做raid0并格式化挂载的脚本

#!/bin/bash

hp_raid0() {
	echo -e "\033[32m========================> I am hp\033[0m"
	hpssacli ctrl all show config|grep Failed -A2|grep -vE '^$'
	FAILED_LD=`hpssacli ctrl all show config|grep Failed -A2|grep -vE '^$'|grep logicaldrive|awk '{print $2}'`
        if [[ $FAILED_LD -gt 0 ]];then
                echo -e "\033[32m========================> Logicaldrive Failed is $FAILED_LD\033[0m"
        else
                echo -e "\033[31m========================> Logicaldrive Failed $FAILED_LD not found...\033[0m";exit 1
        fi
	FAILED_PD=`hpssacli ctrl all show config|grep Failed -A2|grep -vE '^$'|grep physicaldrive|awk '{print $2}'`
	if [[ $FAILED_PD =~ I: ]];then
		echo -e "\033[32m========================> Physicaldrive $FAILED_PD will be configured as raid0...\033[0m"
                read -p "========================> Confirm? [y/n]:" ANS
		if [ $ANS = y ];then
			hpssacli ctrl slot=0 ld $FAILED_LD delete forced
			if [[ $? -eq 0 ]];then
				echo -e "\033[32m========================> Logicaldrive [$FAILED_LD] delete successfully...\033[0m"
			else
				echo -e "\033[31m========================> Logicaldrive [$FAILED_LD] delete failed...\033[0m";exit 1
			fi
			hpssacli ctrl slot=0 create type=ld drives=$FAILED_PD raid=0
                        if [[ $? -eq 0 ]];then
                                echo -e "\033[32m========================> Physicaldrive [$FAILED_PD] config successfully...\033[0m"
                        else
                                echo -e "\033[31m========================> Physicaldrive [$FAILED_PD] config failed...\033[0m";exit 1
                        fi
		else
			exit 1
		fi
	else
		echo -e "\033[31m========================> Physicaldrive $FAILED_PD seems wrong...\033[0m";exit 1
	fi

}

dell_raid0() {
	echo -e "\033[32m========================> I am dell\033[0m"
        FAILED_PD=`/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll -NoLog|grep -B 18 "Firmware state: Unconfigured"|sed -n '1,2p'|awk -F ': ' '{print $2}'|paste -s -d :`
	if [[ $FAILED_PD =~ ^32: ]];then
	        echo -e "\033[32m========================> Physicaldrive $FAILED_PD will be configured as raid0...\033[0m"
                read -p "========================> Confirm? [y/n]:" ANS
		if [ $ANS = y ];then
			/opt/MegaRAID/MegaCli/MegaCli64 -CfgLdAdd -r0[$FAILED_PD] WB Direct -a0
			if [[ $? -eq 0 ]];then
				echo -e "\033[32m========================> Physicaldrive [$FAILED_PD] config successfully...\033[0m"
			else
				echo -e "\033[31m========================> Physicaldrive [$FAILED_PD] config failed...\033[0m";exit 1
			fi
		else
			exit 1
		fi
	else
		echo -e "\033[31m========================> Physicaldrive $FAILED_PD seems wrong...\033[0m";exit 1
	fi
}

format() {
        for i in `ls /dev/|grep -E '^sd'|sort`
               do
                       echo ${i:0:3} >> /tmp/sd.list
               done
        DISK=`cat /tmp/sd.list|uniq -c|grep 1|sed 's/\s\+/ /g'|cut -d ' ' -f3`

	if [[ "$DISK" =~ ^sd ]];then
	        echo -e "\033[32m========================> Start format $DISK as ext4 file system...\033[0m"
                read -p "========================> Confirm? [y/n]:" ANS
		if [ $ANS = y ];then
		        parted -s /dev/$DISK mklabel gpt
		        parted -s /dev/$DISK mkpart primary 1 100%
	        	mkfs.ext4 /dev/$DISK'1'
		else
			exit 1
		fi
	else
		echo -e "\033[31m========================> New disk name "$DISK" seems wrong...\033[0m";exit 1
	fi

        for i in `cat /etc/fstab |grep UUID|cut -d'=' -f2|cut -d ' ' -f1`
                do
                        blkid |grep -q $i||echo $i > /tmp/old_id;
                done

        OLD_ID=`cat /tmp/old_id`
	if [[ $OLD_ID ]];then
		echo -e "\033[32m========================> Old uuid is $OLD_ID\033[0m"
	else
		echo -e "\033[31m========================> Old uuid not found...\033[0m";exit 1
	fi

        MNT_POINT=`cat /etc/fstab |grep $OLD_ID|awk '{print $2}'`
	if [[ $MNT_POINT ]];then
		echo -e "\033[32m========================> Mount point is $MNT_POINT\033[0m"
	else
		echo -e "\033[31m========================> Mount point not found...\033[0m";exit 1
	fi

	for i in `blkid|cut -d '"' -f2`
		do
			grep -q $i /etc/fstab||echo $i > /tmp/new_id;
		done
	NEW_ID=`cat /tmp/new_id`
	if [[ $NEW_ID ]];then
		echo -e "\033[32m========================> New uuid is $NEW_ID\033[0m"
	else
		echo -e "\033[31m========================> New uuid not found...\033[0m";exit 1
	fi

        umount -f $MNT_POINT
        mount /dev/$DISK'1' $MNT_POINT/
	sed -i "s/$OLD_ID/$NEW_ID/" /etc/fstab
	NEW_LINE=`cat /etc/fstab|grep $NEW_ID`
	echo -e "\033[32m========================> /etc/fstab updated...\033[0m\n$NEW_LINE"
}


rm -f /tmp/sd.list
dmidecode|grep Vendor|grep -q Dell&&BRAND=Dell||BRAND=HP
STAMP=`date "+%Y%m%d"`
cp /etc/fstab /etc/fstab_$STAMP

case $BRAND in
	HP)
		hp_raid0
		format
	;;
	Dell)
		dell_raid0
		format
	;;
	*)
		echo -e "\033[31m========================> BRAND is wrong...\033[0m";exit 1
esac

 

生成rundeck的resources文件的shell脚本

这里选择json格式的文件,首先是一个模板文件叫tl.json

"_ALLIP": {
    "username": "ffsa",
    "hostname": "_IP",
    "nodename": "_ALLIP",
    "adminip": "_ADMINIP",
    "idracmac": "_IDRACMAC",
    "mod": "_MOD",
    "sn": "_SN",
    "appname": "_APPNAME",
    "master": "_MASTER"
},

脚本如下,cmdb_pserver.list文件是从CMDB数据库中查询出来的服务器信息列表

#!/bin/bash

echo "{" > resources_pserver.json
cat cmdb_pserver.list |\
while read line;do
    ALLIP=`echo $line |awk '{print $1}'`
    IP=`echo $line |awk '{print $1}'|awk -F '|' '{print $1}'`
        ADMINIP=`echo $line |awk '{print $2}'`
    IDRACMAC=`echo $line |awk '{print $3}'`
    MOD=`echo $line |awk '{print $5}'`
    SN=`echo $line |awk  '{print $4}'`
        APPNAME=`echo $line |awk '{print $6}'`
        MASTER=`echo $line |awk '{print $7}'`

    #echo "===================================="
        #echo allip: $ALLIP
    #echo ip: $IP
    #echo adminip: $ADMINIP
    #echo idracmac: $IDRACMAC
    #echo mod: $MOD
    #echo sn: $SN
    #echo appname: $APPNAME
    #echo master: $MASTER

    cat tl.json |sed -e "s/_ALLIP/$ALLIP/g" -e "s/_IP/$IP/g" -e "s/_ADMINIP/$ADMINIP/g" -e "s/_IDRACMAC/$IDRACMAC/g" -e "s/_MOD/$MOD/g" -e "s/_SN/$SN/g" -e "s/_APPN
AME/$APPNAME/g" -e "s/_MASTER/$MASTER/g" >> resources_pserver.json

done

sed -i '$s/,$//' resources_pserver.json
echo "}" >> resources_pserver.json

 

分配虚拟机并更新CMDB的shell脚本一例

用于提取jira描述段信息,根据这些信息去查CMDB库然后分配机器并更新CMDB库,做个备忘:)

shell中调用的desc.py

#!/usr/bin/env python

import json,sys
reload(sys)
sys.setdefaultencoding( "utf-8" )

f=file('json')
s=json.load(f)

print s["fields"]["description"]

shell

#!/bin/bash

JID=$1

if [ -z "${JID}" ]; then
        echo "ERROR: need Jira ID, it's a number usually"
        echo "Usage: $0 JiraID"
        exit 1
fi

if [ -f jid/${JID} ];then
    echo "Notice: JIRA-${JID} had been executed,exit..."
    exit 1
fi

curl -u user:passwd http://jira.yourdomain.cn/issue/JIRA-${JID} > json 2>/dev/null
python desc.py > desc
dos2unix desc > /dev/null 2>&1

APPNAME=`cat desc|grep '应用名称'|cut -d: -f2|sed "s/ //g"`
MASTER=`cat desc|grep '应用负责人'|cut -d: -f2|sed "s/ //g"`
#echo "Appname: ${APPNAME}"
#echo "Master: ${MASTER}"

if [ -z "${APPNAME}" ] || [ -z "${MASTER}" ]; then
    echo "Error: Appname or Master can't Found,PLS check description in jira,exit..."
    exit 1
fi

function get_conf() {
    ENVIR=$1
    CONF=`cat desc|grep ${ENVIR}|cut -d: -f2|sed "s/ //g"|tr '[A-Z]' '[a-z]'|tr '台' 'T'`
    PATTERN="^[0-9]{1,2}TTT[0-9]{1,2}c[0-9]{1,2}g[0-9]{3}g"
        #CONF=`cat desc|grep ${ENVIR}|cut -d: -f2|sed "s/ //g"|tr '[A-Z]' '[a-z]'`
        #PATTERN="^[0-9]{1,2}台[0-9]{1,2}c[0-9]{1,2}g[0-9]{3}g"

    if [[ "$CONF" =~ $PATTERN ]];then
        NUM=`echo ${CONF}|cut -d'T' -f1`
        CORES=`echo ${CONF}|cut -d'c' -f1|cut -d'T' -f4`
        MEMORY=`echo ${CONF}|cut -dc -f2|cut -dg -f1|sed "s/ //g"`
        TMP=`echo ${CONF}|cut -dg -f2|sed "s/ //g"`
        DISK=${TMP:0:3}
        #echo Num\&Conf: ${CONF}
    else
        echo "Error: Invalid Config,PLS check description in jira,exit..."
        exit 1
    fi
}

function select_srv() {
        ENVIR=$1;CORES=$2;MEMORY=$3;DISK=$4;NUM=$5
        SQL="select ip from table where assign="1" and env="${ENVIR}" and cores="${CORES}" and memory="${MEMORY}" and disk="${DISK}" limit ${NUM};"
        #echo Query: "${SQL}"
        mysql -uuser -ppasswd -h1.1.1.1 -Ddbname -e "${SQL}"|grep -v -E 'ip|hostname|cores|memory|disk|assign' |tee ip.list
    RNUM=`cat ip.list |wc -l`
    if [ ${NUM} -gt ${RNUM} ];then
        echo "Notice: Free server is not enough,exit..."
        exit 1
    fi
}

function db_backup() {
        STAMP=`date "+%Y%m%d%H%M%S"`
        echo "Notice: Table backup...."
        mysqldump -uuser -ppasswd -h1.1.1.1 dbname table > backup/dbname_table_backup_${STAMP}.sql
        if [ $? -eq 0 ];then
                echo "Notice: Table already backup successfully"
        else
                echo "Notice: Table already backup failed,exit..."
                exit 1
        fi
}

function update_rows() {
    ENVIR=$1
    for IP in `cat ip.list`;
        do
            UPDATE="update table set assign='0',appname='${APPNAME}',master='${MASTER}',env='${ENVIR}',jira='http://jira.yourdomain.cn/browse/JIRA-${JID}' where ip='${IP}';"
            #echo Update: "${UPDATE}" | tee -a logs/JIRA-${JID}.log
            echo Update: "${UPDATE}" >> logs/JIRA-${JID}.log
            mysql -uuser -ppasswd -h1.1.1.1 -Ddbname -e "${UPDATE}"
        done
}

db_backup

for ENVIR in A环境 B环境 C环境
    do
        echo "${ENVIR} #################################"
        if cat desc | grep -q ${ENVIR};then
                get_conf ${ENVIR}
                if echo ${ENVIR} | grep -q A环境;then
                            select_srv 1 ${CORES} ${MEMORY} ${DISK} ${NUM}
        	        		update_rows 1
                elif echo ${ENVIR} | grep -q B环境;then
                                        select_srv 3 ${CORES} ${MEMORY} ${DISK} ${NUM}
                                        update_rows 2
                else
                    select_srv 3 ${CORES} ${MEMORY} ${DISK} ${NUM}
                                        update_rows 3
                fi
        else
            echo "Notice: ${ENVIR} not found,ignore..."
        fi
    done
touch jid/${JID}

 

 

让进程在后台运行

通过ssh连上服务器之后,如果要运行一个耗时较长的任务,就会担心因为网络断开导致执行失败。这是因为通过ssh登陆系统之后,bash成为了sshd程序的一个子进程,所执行的任务则是bash的子进程。当网络断开后终端会收到一个HUP(hangup)信号,然后关闭终端的所有子进程,所以执行的任务也会被停止。

那么,要让任务不受网络断开的影响,便有两个思路:

1,让进程忽略HUP信号。

2,让进程不属于终端的子进程。

第一种思路可以使用nohup命令实现

# nohup ping baidu.com > /dev/null &
[1] 29876
# nohup: ignoring input and redirecting stderr to stdout
# ps -ef|grep ping|grep -v grep
root     29876 25203  0 16:53 pts/0    00:00:00 ping baidu.com

关闭当前终端,再重新打开一个终端,还会看到这个进程。

如果是已经开始执行的任务,那么就需要使用disown命令来忽略HUP信号

对于已经开始在前台运行的程序,需要先挂起,再使用bg让他在后台运行

# ping baidu.com > /dev/null
^Z
[1]+  Stopped                 ping baidu.com > /dev/null
# jobs
[1]+  Stopped                 ping baidu.com > /dev/null
# bg 1
[1]+ ping baidu.com > /dev/null &
# jobs
[1]+  Running                 ping baidu.com > /dev/null &
# disown -h %1
# ps -ef|grep ping|grep -v grep
root      3139  2892  0 17:17 pts/2    00:00:00 ping baidu.com
# logout
重新登陆之后进程还会在
# ps -ef|grep ping|grep -v grep
root      3139     1  0 17:17 ?        00:00:00 ping baidu.com

如果已经使用了&符号让任务在后台运行了

# ping baidu.com > /dev/null &
[1] 4339
# jobs
[1]+  Running                 ping baidu.com > /dev/null &
# disown -h %1
# logout
重新登陆之后
# ps -ef|grep ping|grep -v grep
root      4339     1  0 17:22 ?        00:00:00 ping baidu.com

如果有比较多的任务都需要忽略HUP信号,那就需要使用screen命令了,这个命令略强大,回头再说:)

 

第二种思路可以有两种实现

1,setsid,使用setsid命令可以让进程的ppid为1,即init进程

# setsid ping baidu.com > /dev/null 
# ps -ef|grep ping|grep -v grep
root     31225     1  0 16:59 ?        00:00:00 ping baidu.com

2,使用子shell也可以让进程的ppid为1

# (ping baidu.com > /dev/null &)
# ps -ef|grep ping |grep -v grep
root     32655     1  0 17:05 pts/0    00:00:00 ping baidu.com

 

参考:http://www.ibm.com/developerworks/cn/linux/l-cn-nohup/index.html

用AWK及Python求和及平均值

文件如下

# cat cesc 
a,1
a,2
b,3
b,4
c,2
d,5

需要获取abcd出现的次数,逗号后面数字的和及平均值。

With shell:

# grep -E ^a cesc |awk -F ',' '{sum+=$2} END {print "a, Count:" NR " Sum: " sum " Average: " sum/NR}'
a, Count:2 Sum: 3 Average: 1.5
# grep -E ^b cesc |awk -F ',' '{sum+=$2} END {print "b, Count:" NR " Sum: " sum " Average: " sum/NR}'
b, Count:2 Sum: 7 Average: 3.5
# grep -E ^c cesc |awk -F ',' '{sum+=$2} END {print "c, Count:" NR " Sum: " sum " Average: " sum/NR}'
c, Count:1 Sum: 2 Average: 2
# grep -E ^d cesc |awk -F ',' '{sum+=$2} END {print "d, Count:" NR " Sum: " sum " Average: " sum/NR}'
d, Count:1 Sum: 5 Average: 5

或者写成一个for循环,这样可移植性更好,另外,在awk中引用shell的变量有两种办法,一个是用双引号和单引号包含变量,如:”‘var'”,还有就是使用awk的-v参数提前声明,如:awk -v var=”$var”

# for i in `cat cesc |cut -d, -f1|sort|uniq`;do grep -E ^$i cesc |awk -F ',' '{sum+=$2} END {print "'$i'" " Count: " NR ", Sum: " sum ", Average: " sum/NR}';done
a Count: 2, Sum: 3, Average: 1.5
b Count: 2, Sum: 7, Average: 3.5
c Count: 1, Sum: 2, Average: 2
d Count: 1, Sum: 5, Average: 5

或者:

# for i in `cat cesc |cut -d, -f1|sort|uniq`;do grep -E ^$i cesc |awk -v i="$i" -F ',' '{sum+=$2} END {print i " Count: " NR ", Sum: " sum ", Average: " sum/NR}';done
a Count: 2, Sum: 3, Average: 1.5
b Count: 2, Sum: 7, Average: 3.5
c Count: 1, Sum: 2, Average: 2
d Count: 1, Sum: 5, Average: 5

 

 

With python:(python的整形除法默认地板除,只返回一个整形,可以使用from __future__ import division来实现真正的除法)

from __future__ import division

alist = []
blist = []
clist = []
dlist = []
for i in open('cesc'):
    ss = i.split(',')
    if ss[0] == 'a':
        alist.append(int(ss[1]))
    elif ss[0] == 'b':
        blist.append(int(ss[1]))
    elif ss[0] == 'c':
        clist.append(int(ss[1]))
    elif ss[0] == 'd':
        dlist.append(int(ss[1]))

print 'a, Count: ' + str(len(alist)) + ', Sum: ' + str(sum(alist)) + '. Average: ' + str(sum(alist)//len(alist))
print 'b, Count: ' + str(len(blist)) + ', Sum: ' + str(sum(blist)) + '. Average: ' + str(sum(blist)//len(blist))
print 'c, Count: ' + str(len(clist)) + ', Sum: ' + str(sum(clist)) + '. Average: ' + str(sum(clist)//len(clist))
print 'd, Count: ' + str(len(dlist)) + ', Sum: ' + str(sum(dlist)) + '. Average: ' + str(sum(dlist)//len(dlist))

 

Shell内置的字符串操作符

使用shell内置的字符串操作符可以精简代码,并提高脚本运行效率,唯一的缺点可能是比较难记,所以在这里做个记录。

${var}	变量var的值, 与$var相同
${var-DEFAULT}	如果var没有被声明, 那么就以$DEFAULT作为其值 *
${var:-DEFAULT}	如果var没有被声明, 或者其值为空, 那么就以$DEFAULT作为其值 *
${var=DEFAULT}	如果var没有被声明, 那么就以$DEFAULT作为其值 *
${var:=DEFAULT}	如果var没有被声明, 或者其值为空, 那么就以$DEFAULT作为其值 *
${var+OTHER}	如果var声明了, 那么其值就是$OTHER, 否则就为null字符串
${var:+OTHER}	如果var被设置了, 那么其值就是$OTHER, 否则就为null字符串
${var?ERR_MSG}	如果var没被声明, 那么就打印$ERR_MSG *
${var:?ERR_MSG}	如果var没被设置, 那么就打印$ERR_MSG *
${!varprefix*}	匹配之前所有以varprefix开头进行声明的变量
${!varprefix@}	匹配之前所有以varprefix开头进行声明的变量
${#string}	$string的长度 	 
${string:position}	在$string中, 从位置$position开始提取子串
${string:position:length}	在$string中, 从位置$position开始提取长度为$length的子串
${string#substring}	从变量$string的开头, 删除最短匹配$substring的子串
${string##substring}	从变量$string的开头, 删除最长匹配$substring的子串
${string%substring}	从变量$string的结尾, 删除最短匹配$substring的子串
${string%%substring}	从变量$string的结尾, 删除最长匹配$substring的子串
${string/substring/replacement}	使用$replacement, 来代替第一个匹配的$substring
${string//substring/replacement}	使用$replacement, 代替所有匹配的$substring
${string/#substring/replacement}	如果$string的前缀匹配$substring, 那么就用$replacement来代替匹配到的$substring
${string/%substring/replacement}	如果$string的后缀匹配$substring, 那么就用$replacement来代替匹配到的$substring
以上内容来自http://www.cnblogs.com/chengmo/archive/2010/10/02/1841355.html

一个简单的shadowsocks启动脚本

#!/bin/bash
#processname: shadowsocks

#/usr/bin/nohup /usr/local/bin/ssserver -c /usr/local/shadowsocks/config.json > log &

case $1 in

    start)
        echo -n "Starting shadowsocks"
        nohup /usr/local/bin/ssserver -c /usr/local/shadowsocks/config.json > /usr/local/shadowsocks/log &
        echo " done"
    ;;

    stop)
        echo -n "Stopping shadowsocks"
        kill -9 `ps -ef|grep '/usr/bin/python /usr/local/bin/ssserver'|grep -v 'grep'|awk {'print $2'}`
        echo " done"
    ;;

    restart)
        echo -n "Restarting shadowsocks"
        $0 stop
        sleep 1
        $0 start
        echo " done"
    ;;

    *)
        echo -n "Usage: $0 {start|stop|restart}"
    ;;

esac
 

awk命令学习笔记

语法:

awk [ -F re] [parameter...] ['prog'] [-f progfile][in_file...]
  参数说明:
-F re:允许awk更改其字段分隔符。

parameter: 该参数帮助为不同的变量赋值。

'prog': awk的程序语句段。这个语句段必须用单拓号:'和'括起,以防被shell解释。'prog'语句段的标准形式为:'pattern {action}'

pattern参数可以是egrep正则表达式中的任何一个,它可以使用语法/re/再加上一些样式匹配技巧构成。与sed类似,你也可以使用","分开两样式以选择某个范围。 

action参数总是被大括号包围,由awk语句组成,各语句之间用";"分隔。awk解释它们,并在pattern给定的样式匹配的记录上执行操作。你可以省略pattern和 action之一,但不能两者同时省略,当省略pattern时没有样式匹配,表示对所有行(记录)均执行操作,省略action时执行缺省的操作——print,在标准输出上显示。 

-f progfile:允许awk调用并执行progfile指定有程序文件。progfile是一个文本文件,他必须符合awk的语法,适合awk程序较大时使用。 

in_file:awk的输入文件,awk允许对多个输入文件进行处理。值得注意的是awk不修改输入文件。如果未指定输入文件,awk将接受标准输入,并将结果显示在标准输出上。awk支持输入输出重定向。

  记录、字段与内置变量: awk默认将一行视为一个记录,而字段指一行中的某一部分(由分隔符决定,默认为空格)。 $0用来表示一行记录,$1,$2…$n用来表示一行记录中的不同字段。   常用的内置变量有: NF (Number of Fields): 为一整数, 其值表$0上所存在的字段数目. NR (Number of Records): 为一整数, 其值表awk已读入的数据行数目. FILENAME: 正在处理的数据文件文件名.  

运算符:

赋值运算符
= += -= *= /= %= ^= **=	赋值语句

逻辑运算符
||	逻辑或
&&	逻辑与

正则运算符
~ ~!	匹配正则表达式和不匹配正则表达式

关系运算符
< <= > >= != ==	关系运算符

算术运算符
+ -	加,减
* / &	乘,除与求余
+ - !	一元加,减和逻辑非
^ ***	求幂
++ --	增加或减少,作为前缀或后缀

 


  测试文件内容如下,第一列为名称,第二列为时薪,第三列为工时。

Theo	5.10	12
Beth	4.00	0
Dan	3.75	11
Aaron	6.10	30
kathy	4.00	10
Mark	5.00	20
Mary	5.50	22
Susie	4.25	18
  获取每个人的总薪水(printf格式化输出,%6s表示长度为6的字符串,%3d表示长度为3的整数)
# awk '{ printf("%6s Work hours: %3d Pay: %5d\n", $1,$3, $2* $3) }' emp.data 
  Theo Work hours:  12 Pay:    61
  Beth Work hours:   0 Pay:     0
   Dan Work hours:  11 Pay:    41
 Aaron Work hours:  30 Pay:   183
 kathy Work hours:  10 Pay:    40
  Mark Work hours:  20 Pay:   100
  Mary Work hours:  22 Pay:   121
 Susie Work hours:  18 Pay:    76

  获取M开头的人的总薪水(~表示匹配,/^M.*/为正则表达式)

# awk '$1 ~ /^M.*/ { printf("%6s Work hours: %3d Pay: %5d\n", $1,$3, $2* $3) }' emp.data 
  Mark Work hours:  20 Pay:   100
  Mary Work hours:  22 Pay:   121

  获取工作时间大于等于20的人的名称(>=为操作符)

# awk '$3 >= 20 {print $1}' emp.data 
Aaron
Mark
Mary

  获取工作时间大于等于20且时薪小于等于5.5的记录(两个pattern,一个action,&&表示两个pattern的逻辑与操作)

# awk '$3 >= 20&&$2 <= 5.5 {print $0}' emp.data 
Mark	5.00	20
Mary	5.50	22

  获取工作时间大于等于20或时薪小于5的记录(两个pattern,一个action,||表示两个pattern的逻辑或操作)

# awk '$3 >= 20||$2 <= 5 {print $0}' emp.data 
Beth	4.00	0
Dan	3.75	11
Aaron	6.10	30
kathy	4.00	10
Mark	5.00	20
Mary	5.50	22
Susie	4.25	18

  给时薪小于等于4的人加薪10%并输出加薪后的记录(一个pattern,两个action)

# awk '$2 <= 4 {$2*=1.1} { printf("%6s %4s %5s\n",$1,$2,$3) }' emp.data 
  Theo 5.10    12
  Beth  4.4     0
   Dan 4.125    11
 Aaron 6.10    30
 kathy  4.4    10
  Mark 5.00    20
  Mary 5.50    22
 Susie 4.25    18

  给时薪小于等于4且工时大于等于10的人加薪10%并输出加薪后的记录(两个pattern,两个action)

# awk '$2 <= 4&&$3 >=10 {$2*=1.1} { printf("%6s %4s %5s\n",$1,$2,$3) }' emp.data 
  Theo 5.10    12
  Beth 4.00     0
   Dan 4.125    11
 Aaron 6.10    30
 kathy  4.4    10
  Mark 5.00    20
  Mary 5.50    22
 Susie 4.25    18

            参考: http://www.aslibra.com/doc/awk.htm http://www.cnblogs.com/chengmo/archive/2010/10/11/1847515.html http://net.pku.edu.cn/~yhf/tutorial/awk_manual.html http://awk.readthedocs.org/en/latest/chapter-one.html

初识Rundeck

  • 背景

知道rundeck是在saltstack的QQ群里面,Google了一下觉得应该适合我们现在的需求。 官方网站:http://rundeck.org

  • 安装

从安装开始:http://rundeck.org/docs/administration/installation.html 最简单的方式是下载rundeck的jar包,然后直接启动,当然首先需要java环境,貌似需要1.6及以上版本支持。

#mkdir /usr/local/rundeck
#cd /usr/local/rundeck
#wget http://dl.bintray.com/rundeck/rundeck-maven/rundeck-launcher-2.4.0.jar
#java -XX:MaxPermSize=256m -Xmx1024m -jar rundeck-launcher-2.4.0.jar
设置环境变量
#vi /etc/profile
添加export RDECK_BASE=/usr/local/rundeck
#source /etc/profile
启动之后rundeck会在安装目录下新建一些文件夹
#tree -L 1 rundeck/
rundeck/
|-- etc
|-- libext
|-- projects
|-- rundeck-launcher-2.4.0.jar
|-- server
|-- tools
`-- var

6 directories, 1 file

启动之后会发现rundeck使用主机名作为URL地址,需要自己修改一下两个配置文件,将主机名修改为IP地址,也可再次修改服务端口,改好之后应该是像下面这样。

#more /usr/local/rundeck/etc/framework.properties
# framework.properties -
#

# ----------------------------------------------------------------
# Server connection information
# ----------------------------------------------------------------

framework.server.name = 192.168.100.100
framework.server.hostname = 192.168.100.100
framework.server.port = 4440
framework.server.url = http://192.168.100.100:4440
# Username/password used by CLI tools.
framework.server.username = admin
framework.server.password = admin

# ----------------------------------------------------------------
# Installation locations
# ----------------------------------------------------------------

rdeck.base=/usr/local/rundeck

framework.projects.dir=/usr/local/rundeck/projects
framework.etc.dir=/usr/local/rundeck/etc
framework.var.dir=/usr/local/rundeck/var
framework.tmp.dir=/usr/local/rundeck/var/tmp
framework.logs.dir=/usr/local/rundeck/var/logs
framework.libext.dir=/usr/local/rundeck/libext

# ----------------------------------------------------------------
# SSH defaults for node executor and file copier
# ----------------------------------------------------------------

framework.ssh.keypath = /root/.ssh/id_rsa
framework.ssh.user = root

# ssh connection timeout after a specified number of milliseconds.
# "0" value means wait forever.
framework.ssh.timeout = 0
#more /usr/local/rundeck/server/config/rundeck-config.properties
#loglevel.default is the default log level for jobs: ERROR,WARN,INFO,VERBOSE,DEBUG
loglevel.default=INFO
rdeck.base=/usr/local/rundeck

#rss.enabled if set to true enables RSS feeds that are public (non-authenticated)
rss.enabled=false
grails.serverURL=http://192.168.100.100:4440
dataSource.dbCreate = update
dataSource.url = jdbc:h2:file:/usr/local/rundeck/server/data/grailsdb;MVCC=true
将rundeck链接为服务
#ln -s /usr/local/rundeck/server/sbin/rundeckd /etc/init.d/
OK,现在可以启动了
#/etc/init.d/rundeck start
查看日志
root@RUNDECK:/usr/local/rundeck# tail -f /usr/local/rundeck/var/log/service.log 
2015-01-23 10:24:43.224:INFO:oejs.Server:jetty-7.6.0.v20120127
2015-01-23 10:24:45.509:INFO:oejw.StandardDescriptorProcessor:NO JSP Support for /, did not find org.apache.jasper.servlet.JspServlet
2015-01-23 10:24:46.390:INFO:/:Initializing Spring root WebApplicationContext
2015-01-23 10:24:58,708 INFO  BootStrap - Starting Rundeck 2.4.0-1...
2015-01-23 10:24:58,712 INFO  BootStrap - using rdeck.base config property: /usr/local/rundeck
2015-01-23 10:24:58,722 INFO  BootStrap - loaded configuration: /usr/local/rundeck/etc/framework.properties
2015-01-23 10:24:58,797 INFO  BootStrap - RSS feeds disabled
2015-01-23 10:24:59.841:INFO:oejsh.ContextHandler:started o.e.j.w.WebAppContext{/,file:/usr/local/rundeck/server/exp/webapp/},/usr/local/rundeck/server/exp/webapp
2015-01-23 10:24:59.947:INFO:/:Initializing Spring FrameworkServlet 'grails'
2015-01-23 10:25:00.002:INFO:oejs.AbstractConnector:Started SelectChannelConnector@0.0.0.0:4440
说明服务已启动,现在可以从前台访问了,http://192.168.100.100:4440
  • 配置NODE

第一次访问rundeck会提示你新建一个Project,按照提示新建,我只输入了project name,其余默认,然后create。 到这里,该添加nodes了,我使用ssh key免密码登陆,首先需要设置好rundeck服务器到其他node服务器的ssh认证,步骤省略,在设置好之后,按照http://rundeck.org/2.4.0/man5/resource-xml.html这里说的,编辑下面这个文件

#more /usr/local/rundeck/projects/Project_A/etc/resources.xml
<?xml version="1.0" encoding="UTF-8"?>

<project>
  <node name="192.168.100.100" description="Rundeck server node" tags="" hostname="192.168.100.100" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.11.0-15-generic" username="root"/>
  <node name="192.168.100.101" type="Node" tags="web" role="web" description="web" hostname="192.168.100.101:11223" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.11.0-15-generic" username="root"/>
  <node name="192.168.100.102" type="Node" tags="wap" role="wap" description="wap" hostname="192.168.100.102:11223" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.11.0-15-generic" username="root"/>
</project>

第一个node是rundeck服务器的信息,后面两个是添加进去的node信息,现在在前台的就可以看到了。

  • 配置用户

接下来需要添加用户,rundeck默认有两个用户角色,一个是admin,一个是user,所有用户(包括admin)都必须属于user,在/usr/local/rundeck/server/config/realm.properties文件里可以看到

# more realm.properties 
#
# This file defines users passwords and roles for a HashUserRealm
#
# The format is
#  <username>: <password>[,<rolename> ...]
#
# Passwords may be clear text, obfuscated or checksummed.  The class 
# org.mortbay.util.Password should be used to generate obfuscated
# passwords or password checksums
#
# This sets the temporary user accounts for the Rundeck app
#
admin:admin,user,admin
user:user,user

冒号前是用户名,后面是密码,再后面便是用户的角色,用逗号分隔。 我只想给用户read和run的权限,这块琢磨了很久,也不知道我的方法对不对。我没有找到新建用户组在哪里配置,只能一个一个添加用户,还好用户不多。参考了:http://rundeck.org/docs/administration/authenticating-users.html 和 http://rundeck.org/docs/administration/access-control-policy.html 新建用户

# cd /usr/local/rundeck
# java -cp server/lib/jetty-all-7.6.0.v20120127.jar org.eclipse.jetty.util.security.Password username passwd
passwd
OBF:1kwa1v251rwd1uvc1ljv1xfn1zen1xff1llv1uuu1rwh1v291svo1j611lxl1kqp1jyb
MD5:dd9162bbad929f64b0fa3
CRYPT:UMbF7fN.

给用户赋权限,可以新建多个下面的文件,每个用户对应一个文件

# cd /usr/local/rundeck/etc
# more username.aclpolicy 
description: username.
context:
  project: '.*'
for:
  resource:
    - allow: read
  job:
    - match:
        group: '.*'
        name: '.*'
      allow: [run,read]
by:
  username: username

---

description: username.
context:
  application: 'rundeck'
for:
  resource:
    - allow: read
  project:
    - allow: read
  storage:
    - allow: read
by:
  username: username

这样新用户就可以登陆了,现在系统中有三个用户,一个是admin,一个是user,一个是新建的username,记得在realm.properties文件中修改admin和user的密码。

  • 建立工作流

环境:nginx通过upstream代理两个glassfish节点 新建一个project名为wap_deploy,在project中建立如下几个job: 1,kick_out_192.168.0.1:7001 负责从nginx的upstream中踢出192.168.0.1:7001节点,workflow里的shell脚本如下

#!/bin/bash

NODE=$1
grep -q -E "#[[:blank:]]*server $NODE" /usr/local/nginx/conf/nginx.conf

if [ $? -eq 0 ];then
    echo ''$NODE' had been Kicked already'
else
    /bin/sed -i '/server '$NODE';/s/^/#/' /usr/local/nginx/conf/nginx.conf
    /etc/init.d/nginx reload

    if [ $? -eq 0 ];then
        echo ''$NODE' had been Kicked from nginx configuration successfully'
    fi
fi

Arguments里写upstream中定义的节点即可,这里是192.168.0.1:7001 2,kick_out_192.168.0.2:7001 内容同上,shell脚本可复用,改变Arguments为192.168.0.2:7001即可。 3,add_in_192.168.0.1:7001 负责把部署好的节点重新加入upstream,workflow里的shell脚本如下

#!/bin/bash
NODE=$1

/bin/sed -i '/server '$NODE';/s/^#//' /usr/local/nginx/conf/nginx.conf

if [ $? -eq 0 ];then
    echo ''$NODE' had been Added to nginx configuration successfully'
fi

/etc/init.d/nginx reload

Arguments里写upstream中定义的节点即可,这里是192.168.0.1:7001 4, add_in_192.168.0.2:7001 内容同上,shell脚本可复用,改变Arguments为192.168.0.2:7001即可。 5,deploy_wap_192.168.0.1 负责从hudson上取出打包好的war程序,复制到部署目录并执行部署,共三个过程。 workflow中先添加“kick_out_192.168.0.1:7001”job,以便从upstream中踢出节点。 然后第二步定义部署过程,shell脚本如下

#/bin/bash

source /etc/profile

SVN_SERVER=$1
HUDSON_JOB=$2
WAR_NAME=$3
DOMAIN=$4
CSL_PORT=$5
GNAME=$6
CONTEXTROOT=$7

if [ -e /usr/local/glassfish3/glassfish/domains/$DOMAIN/deployment/$WAR_NAME ];
 then
    cd /usr/local/glassfish3/glassfish/domains/$DOMAIN/deployment/
    /bin/cp $WAR_NAME $WAR_NAME'_'`date +%Y%m%d%H%M`
    scp $SVN_SERVER:/usr/local/hudson/jobs/$HUDSON_JOB/workspace/target/$WAR_NAME /usr/local/glassfish3/glassfish/domains/$DOMAIN/deployment/

else
    scp $SVN_SERVER:/usr/local/hudson/jobs/$HUDSON_JOB/workspace/target/$WAR_NAME /usr/local/glassfish3/glassfish/domains/$DOMAIN/deployment/
fi

if [ $? -eq 0 ];then
    echo $WAR_NAME ' had been copied successfully'
fi

/usr/local/glassfish3/glassfish/bin/asadmin --user admin --passwordfile /home/abc/p.txt --port $CSL_PORT deploy --force --name=$GNAME  --contextroot /$CONTEXTROOT /usr/local/glassfish3/glassfish/domains/$DOMAIN/deployment/$WAR_NAME

Arguments略多,第一个为hudson服务器的IP(需配置免密登陆),第二个是hudson的工程名,第三个是war包名称,第四个是部署到glassfish的哪个domain,第五个是所在domain的控制台端口,第六个是asadmin的–name参数,第七个是部署路径(为空则部署在根目录)。如:192.168.0.3 wap wap.war domain1 4800 wap wap 第三步,添加“add_in_192.168.0.1:7001”job,以便把节点加入upstream中。 6, deploy_wap_192.168.0.2 步骤同deploy_wap_192.168.0.1,修改Arguments即可。 7, deploy_wap 这是最后一个job,在workflow里面添加deploy_wap_192.168.0.1和deploy_wap_192.168.0.2两个job即可。 在需要部署的时候,开发人员需要做的只是在hudson上打好包,然后登陆rundeck,进入wap_deploy运行deploy_wap这个job即可,在Log output中也可以查看部署过程中生成的日志。 这样做节省了从服务器上下载及上传war包的时间,也避免了在运维人员在服务器上的误操作,旧的war包更规范的保存在服务器上,方便回退时使用,在rundeck的project日志中可以看到部署的频率及人员,为之后的优化提供参考。

cut命令

cut主要是用来分割文件中的字符串,并且根据要求显示。 测试文件为/etc/passwd___

# cat /etc/passwd___ 
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/bin/sh
bin:x:2:2:bin:/bin:/bin/sh
sys:x:3:3:sys:/dev:/bin/sh
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/bin/sh

-b选项可以按照字节截取,-c和-b的区别在于,-c按照字符截取,在处理中文时方便

# cut -b 1 /etc/passwd___
r
d
b
s
s
g
# cut -b 1-4 /etc/passwd___
root
daem
bin:
sys:
sync
game
# cut -b 1-4,10-15 /etc/passwd___
root0:root
daem1:1:da
bin::bin:/
sys::sys:/
sync65534:
game:60:ga
# cut -b -4 /etc/passwd___
root
daem
bin:
sys:
sync
game
# cut -b 4- /etc/passwd___
t:x:0:0:root:/root:/bin/bash
mon:x:1:1:daemon:/usr/sbin:/bin/sh
:x:2:2:bin:/bin:/bin/sh
:x:3:3:sys:/dev:/bin/sh
c:x:4:65534:sync:/bin:/bin/sync
es:x:5:60:games:/usr/games:/bin/sh
-d用来确定分隔符,-f用来取出字段
# cut -f 1 /etc/passwd___
root:x:0:0:root:/root:/bin/bash
daemon:x:1:1:daemon:/usr/sbin:/bin/sh
bin:x:2:2:bin:/bin:/bin/sh
sys:x:3:3:sys:/dev:/bin/sh
sync:x:4:65534:sync:/bin:/bin/sync
games:x:5:60:games:/usr/games:/bin/sh
# cut -d : -f 1 /etc/passwd___
root
daemon
bin
sys
sync
games
# cut -d : -f 1,3 /etc/passwd___
root:0
daemon:1
bin:2
sys:3
sync:4
games:5

-s用来控制输出

# cut -d : -f 1-4 -s --output-delimiter="|" /etc/passwd___
root|x|0|0
daemon|x|1|1
bin|x|2|2
sys|x|3|3
sync|x|4|65534
games|x|5|60

    参考:http://roclinux.cn/?p=1328http://blog.51yip.com/linux/1077.html