网站指向不同路径的nginx配置

Standard
upstream fastcgi_backend {
    server 127.0.0.1:9000;
    server unix:/var/run/php5-fpm.sock;
    keepalive 10;
}
...
server {
  ...
  ...
  location ~ \.php {
    set $php_root /var/www/website/abc/public;
    include /etc/nginx/fastcgi_params;
 
    if ($request_uri ~ /(api/user/photos|api/user/posts) ) {
      set $php_root /var/www/website/rest/public;
    }
 
    if ($request_uri ~ /api/user/(photos|posts|links) ) {
      set $php_root /var/www/website/rest/public;
    }
 
    fastcgi_split_path_info ^(.+\.php)(/.+)$;
    fastcgi_param PATH_INFO $fastcgi_path_info;
    fastcgi_param PATH_TRANSLATED $php_root$fastcgi_path_info;
    fastcgi_param SCRIPT_NAME $fastcgi_script_name;
    fastcgi_param SCRIPT_FILENAME $php_root$fastcgi_script_name;
    fastcgi_pass fastcgi_backend;
    fastcgi_index index.php;
  }
  ...
}

Add basic HTTP access auth via HAProxy

Standard
userlist UsersForES
  user your_username insecure-password your_password
 
frontend elasticsearch_pwd
    bind *:9201
    mode http
    default_backend es-nodes_pwd
 
backend es-nodes_pwd
    acl AuthOkay_ES http_auth(UsersForES)
    http-request auth realm ES if !AuthOkay_ES
    mode http
    balance roundrobin
    option forwardfor
    server es-node1.yemaosheng.com 10.0.0.2:9200 check
    server es-node2.yemaosheng.com 10.0.0.3:9200 check
    server es-node3.yemaosheng.com 10.0.0.4:9200 check

haproxy cfg for redis sentinel

Standard
frontend redis-cluster
        mode tcp
        option tcplog
        bind *:6379
        # If at least 3 sentinels agree with the redis host that it is master, use it.
        use_backend redis-node1 if { srv_is_up(redis-node1/redis-1:10.25.0.2:6379) } { nbsrv(check_master_redis-1) ge 3 }            
        use_backend redis-node2 if { srv_is_up(redis-node2/redis-2:10.25.0.3:6379) } { nbsrv(check_master_redis-2) ge 3 }
        # If sentinel cant tell us, well, fall back to master detection
        default_backend redis-cluster
 
backend redis-node1
        mode tcp
        balance first
        option tcp-check
        tcp-check send AUTH\ password\r\n
        tcp-check expect string +OK
        tcp-check send info\ replication\r\n
        tcp-check expect string role:master
        server redis-1:10.25.0.2:6379 10.25.0.2:6379 maxconn 5000 check inter 1s
 
backend redis-node2
        mode tcp
        balance first
        option tcp-check
        tcp-check send AUTH\ password\r\n
        tcp-check expect string +OK
        tcp-check send info\ replication\r\n
        tcp-check expect string role:master
        server redis-2:10.25.0.3:6379 10.25.0.3:6379 maxconn 5000 check inter 1s
 
backend redis-cluster
        mode tcp
        balance first
        option tcp-check
        tcp-check send AUTH\ password\r\n
        tcp-check expect string +OK
        tcp-check send info\ replication\r\n
        tcp-check expect string role:master
        tcp-check send info\ persistence\r\n           
        tcp-check expect string loading:0
        server redis-1:10.25.0.2:6379 10.25.0.2:6379 maxconn 5000 check inter 1s
        server redis-2:10.25.0.3:6379 10.25.0.3:6379 maxconn 5000 check inter 1s
 
## Check 4 sentinels to see if they think redis-1 (10.25.0.2) is master
backend check_master_redis-1
        mode tcp
        option tcp-check
        tcp-check send PING\r\n
        tcp-check expect string +PONG
        tcp-check send SENTINEL\ master\ redis\r\n
        tcp-check expect string 10.25.0.2
        tcp-check send QUIT\r\n
        tcp-check expect string +OK
 
        server redis-1:10.25.0.2:26379 10.25.0.2:26379 check inter 2s
        server redis-2:10.25.0.3:26379 10.25.0.3:26379 check inter 2s
        server redis-sentinel1:10.25.0.4:26379 10.25.0.4:26379 check inter 2s
        server redis-sentinel2:10.25.0.5:26379 10.25.0.5:26379 check inter 2s
 
## Check 4 sentinels to see if they think redis-2 (10.25.0.3) is master
backend check_master_redis-2
        mode tcp
        option tcp-check
        tcp-check send PING\r\n
        tcp-check expect string +PONG
        tcp-check send SENTINEL\ master\ redis\r\n
        tcp-check expect string 10.25.0.3
        tcp-check send QUIT\r\n
        tcp-check expect string +OK
 
        server redis-1:10.25.0.2:26379 10.25.0.2:26379 check inter 2s
        server redis-2:10.25.0.3:26379 10.25.0.3:26379 check inter 2s
        server redis-sentinel1:10.25.0.4:26379 10.25.0.4:26379 check inter 2s
        server redis-sentinel2:10.25.0.5:26379 10.25.0.5:26379 check inter 2s

AWS Cloudwatch query script for Zabbix

Standard
#!/usr/bin/python
import boto.ec2.cloudwatch
import sys
import datetime
 
try:
    metName = sys.argv[1]
    funcName = sys.argv[2]
    dimSpace = sys.argv[3]
    region = sys.argv[4]
    accessKey = sys.argv[5]
    secretKey = sys.argv[6]
 
except:
    print "Usage: GetFromCloudwatch.py MetricName Function Dimension Region AWS_ACCESS_KEY AWS_SECRET_ACCESS_KEY"
    print "Example: GetFromCloudwatch.py FreeableMemory Average \"CacheClusterId=ElsticCacheName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    print "         GetFromCloudwatch.py CPUUtilization Average \"DBInstanceIdentifier=RDSName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    print "         GetFromCloudwatch.py ApproximateNumberOfMessagesVisible Average \"QueueName=SQSName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    sys.exit(1)
 
dim = {}
firstSplit = dimSpace.split(',')
for word in firstSplit:
    secondSplit = word.split('=')
    dim[secondSplit[0]] = secondSplit[1]
 
regions = boto.ec2.cloudwatch.regions()
 
reg = ''
for r in regions:
    if region == r.name:
        reg = r
 
c = boto.ec2.cloudwatch.CloudWatchConnection(aws_access_key_id=accessKey, aws_secret_access_key=secretKey, region=reg)
metrics = c.list_metrics(dimensions=dim)
 
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=15)
 
dataPoints = [];
for met in metrics:
    if met.name == metName:
        dataPoints = met.query(start, end, funcName)
 
if len(dataPoints) > 0:
    max = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
    index = 0
    for i in range(0,len(dataPoints)):
        if max < dataPoints[i][u'Timestamp']:
            max = dataPoints[i][u'Timestamp']
            index = i
    for key in dataPoints[index].keys():
        if funcName in key:
            value = dataPoints[index][key]
    print value
else:
    print 'Error! No response from Amazon.'
    sys.exit(2)

AWS VPC通过IPsec连接不同Region

Standard

AWS China<->AWS Sydney

AWS China VPC:

AWS China EC2:

root@ip-10-33-30-103:~# apt-get install openswan
root@ip-10-33-30-103:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSSydneyVPC
    left=10.33.30.103
    leftsubnets=10.33.0.0/16
    leftid=@AwsChinaGW
    right=52.63.189.251
    rightsubnets=172.33.0.0/16
    rightid=@AwsSydneyGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-10-33-30-103:~# cat /etc/ipsec.secrets 
@AwsChinaGW  @AwsSydneyGW: PSK "123321112233"
 
root@ip-10-33-30-103:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-10-33-30-103:~# ipsec verify
root@ip-10-33-30-103:~# service ipsec start
root@ip-10-33-30-103:~# ipsec auto --add ToAWSSydneyVPC
root@ip-10-33-30-103:~# ipsec auto --up ToAWSSydneyVPC
root@ip-10-33-30-103:~# service ipsec status

AWS Sydney VPC:

AWS Sydney EC2:

root@ip-172-33-1-190:~# apt-get install openswan
root@ip-172-33-1-190:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSCnVPC
    left=172.33.1.190
    leftsubnets=172.33.0.0/16
    leftid=@AwsSydneyGW
    right=54.222.193.171
    rightsubnets=10.33.0.0/16
    rightid=@AwsChinaGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-172-33-1-190:~# cat /etc/ipsec.secrets 
@AwsSydneyGW  @AwsChinaGW: PSK "123321112233"
 
root@ip-172-33-1-190:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-172-33-1-190:~# ipsec verify
root@ip-172-33-1-190:~# service ipsec start
root@ip-172-33-1-190:~# ipsec auto --add ToAWSCnVPC
root@ip-172-33-1-190:~# ipsec auto --up ToAWSCnVPC
root@ip-172-33-1-190:~# service ipsec status

确保两边EC2所在安全组对UDP 500, UDP 4500, TCP 50和TCP 51允许通过
关闭两边EC2上的’Source/Dest checking'(更改源/目标 检查)

Elasticsearch cluster on Azure

Standard
#3 VM on Azure
#node1 10.0.0.3
#node2 10.0.0.4
#node3 10.0.0.5
 
apt-get update;
apt-get install default-jdk;
wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -;
echo "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-2.x.list;
apt-get update && sudo apt-get install elasticsearch;
update-rc.d elasticsearch defaults 95 10;
 
apt-get install lvm2
fdisk /dev/sdc
pvcreate /dev/sdc1
pvdisplay 
vgcreate VolGroup00 /dev/sdc1
vgdisplay 
lvcreate -L 500GB -n lvData VolGroup00
lvdisplay 
mkfs -t ext4 /dev/VolGroup00/lvData
mkdir /data
mount /dev/VolGroup00/lvData /data/
blkid
  /dev/mapper/VolGroup00-lvData: UUID="b65c5a78-e078-4ca8-8119-2de94a414002" TYPE="ext4" 
cat /etc/fstab
  UUID=b65c5a78-e078-4ca8-8119-2de94a414002  /data   auto    defaults,nobootwait,nosuid,noexec,noatime,nodiratime    0 0
 
cat /etc/elasticsearch/elasticsearch.yml 
network.host: 0.0.0.0
cluster.name: es-cluster
node.name: node?
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping.timeout: 10s
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts: ["10.0.0.3"]
index.number_of_shards: 3
index.number_of_replicas: 1 
path.data: /data
 
mkdir /data/es-cluster
chown elasticsearch.elasticsearch /data/es-cluster
 
cat /usr/share/elasticsearch/bin/elasticsearch.in.sh
...
if [ "x$ES_MIN_MEM" = "x" ]; then
    ES_MIN_MEM=?g
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
    ES_MAX_MEM=??g
fi
...
 
/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head
/usr/share/elasticsearch/bin/plugin install analysis-smartcn
/usr/share/elasticsearch/bin/plugin install analysis-kuromoji
/usr/share/elasticsearch/bin/plugin list
 
/etc/init.d/elasticsearch start
移动(move)
把分片从一节点移动到另一个节点,可以指定索引名和分片号。
 
取消(cancel)
取消分配一个分片,可以指定索引名和分片号。
node参数可以指定在那个节点取消正在分配的分片。
allow_primary参数支持取消分配主分片。
 
分配(allocate)
分配一个未分配的分片到指定节点,可以指定索引名和分片号。
node参数指定分配到那个节点。
allow_primary参数可以强制分配主分片,不过这样可能导致数据丢失。
 
curl -XPOST 'localhost:9200/_cluster/reroute' -d '{
    "commands" : [ {
        "move" : 
            {
              "index" : "索引名称", "shard" : 分片号, 
              "from_node" : "节点名称A", "to_node" : "节点名称B"
            }
        },
       "cancel" : 
            {
              "index" : "索引名称", "shard" : 分片号, "node" : "节点名称"
            }
        },
        {
          "allocate" : {
              "index" : "索引名称", "shard" : 分片号, "node" : "节点名称"
          }
        }
    ]
}'
 
curl -XPOST localhost:9200/_aliases -d '
{
    "actions": [
        { "remove": {
            "alias": "别名",
            "index": "索引名A"
        }},
        { "add": {
            "alias": "别名",
            "index": "索引名B"
        }}
    ]
}
'
 
curl localhost:9200/_nodes/节点名称/plugins?pretty=true
 
curl -s localhost:9200/_cat/shards
 
 
Elasticsearch版本升级
https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html
1.
curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
  "transient": {
    "cluster.routing.allocation.enable": "none"
  }
}'
 
2.
curl -XPOST http://localhost:9200/_flush/synced
 
3.
apt-get update;
apt-get --only-upgrade install elasticsearch
 
4.
/usr/share/elasticsearch/bin/plugin remove analysis-kuromoji;
/usr/share/elasticsearch/bin/plugin remove analysis-smartcn;
/usr/share/elasticsearch/bin/plugin remove analysis-icu;
/usr/share/elasticsearch/bin/plugin remove mobz/elasticsearch-head;
 
/usr/share/elasticsearch/bin/plugin install analysis-kuromoji;
/usr/share/elasticsearch/bin/plugin install analysis-smartcn;
/usr/share/elasticsearch/bin/plugin install analysis-icu;
/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head;
 
5.
curl -XGET http://localhost:9200/_cat/nodes
 
6.
curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
  "transient": {
    "cluster.routing.allocation.enable": "all"
  }
}'
 
7.
curl -XGET http://localhost:9200/_cat/health

Jenkins安装配置

Standard

详细参考:http://jenkins-php.org

apt-get install default-jdk
wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -
sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list'
apt-get update
apt-get install jenkins
 
java -jar jenkins-cli.jar -s http://localhost:8080 install-plugin git phing checkstyle cloverphp crap4j dry htmlpublisher jdepend plot pmd violations warnings xunit
java -jar jenkins-cli.jar -s http://localhost:8080 safe-restart
 
apt-get install phpunit phpcodesniffer phploc pdepen phpmd phpcpd phpdox php5-xdebug libxml2-utils
1.
curl -L https://raw.githubusercontent.com/sebastianbergmann/php-jenkins-template/master/config.xml | java -jar jenkins-cli.jar -s http://localhost:8080 create-job php-template
 
Or add the template manually:
 
cd $JENKINS_HOME/jobs
mkdir php-template
cd php-template
wget https://raw.github.com/sebastianbergmann/php-jenkins-template/master/config.xml
cd ..
chown -R jenkins:jenkins php-template/
Reload Jenkins configuration, for instance using the Jenkins CLI:
java -jar jenkins-cli.jar -s http://localhost:8080 reload-configuration
 
2.Click on "New Job".
3.Enter a "Job name".
4.Select "Copy existing job" and enter "php-template" into the "Copy from" field.
5.Click "OK".
6.Uncheck the "Disable Build" option.
7.Fill in your "Source Code Management" information.
8.Configure a "Build Trigger", for instance "Poll SCM".
9.Click "Save".

Hive安装配置

Standard

Hadoop集群安装配置

wget http://mirrors.gigenet.com/apache/hive/stable-2/apache-hive-2.0.0-bin.tar.gz
 
tar zxf apache-hive-2.0.0-bin.tar.gz
mv apache-hive-2.0.0 /usr/local/
cd /usr/local
mv apache-hive-2.0.0 hive
 
cat /etc/profile
...
HIVE_HOME=/usr/local/hive
PATH=$PATH:$HIVE_HOME/bin
export HIVE_HOME PATH
...
 
cd /usr/local/hive/conf
cp hive-default.xml.template hive-default.xml
vi /usr/local/hive/conf/hive-site.xml
<configuration>
        <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://Master:3306/hive?createDatabaseIfNotExist=true</value>
        <description>JDBC connect string for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
        <description>Driver class name for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>hive<value>
        <description>username to use against metastore database</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>hive</value>
        <description>password to use against metastore database</description>
        </property>
</configuration>
mysql>CREATE USER 'hive' IDENTIFIED BY 'hive';
mysql>GRANT ALL PRIVILEGES ON *.* TO 'hive'@'hadoop-master' WITH GRANT OPTION;
mysql>FLUSH privileges;
mysql>CREATE DATABASE hive;
cd ~
wget http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.38.tar.gz
tar zxf mysql-connector-java-5.1.38.tar.gz
cd mysql-connector-java-5.1.38
cp mysql-connector-java-5.1.38-bin.jar /usr/local/hive/lib/
 
scp -r /usr/local/hive Slave1:/usr/local/
scp -r /usr/local/hive Slave2:/usr/local/
#create the schema
schematool -initSchema -dbType mysql
 
#for client on Slave1,2
hive --service metastore &
 
hive
hive> show databases;

#Slave1,2

cat /usr/local/hive/conf/hive-site.xml
<configuration>
    <property>  
        <name>hive.metastore.uris</name>  
        <value>thrift://Master:9083</value>
    </property>
</configuration>