AWS VPC通过IPsec连接不同Region

Standard

AWS China<->AWS Sydney

AWS China VPC:

AWS China EC2:

root@ip-10-33-30-103:~# apt-get install openswan
root@ip-10-33-30-103:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSSydneyVPC
    left=10.33.30.103
    leftsubnets=10.33.0.0/16
    leftid=@AwsChinaGW
    right=52.63.189.251
    rightsubnets=172.33.0.0/16
    rightid=@AwsSydneyGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-10-33-30-103:~# cat /etc/ipsec.secrets 
@AwsChinaGW  @AwsSydneyGW: PSK "123321112233"
 
root@ip-10-33-30-103:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-10-33-30-103:~# ipsec verify
root@ip-10-33-30-103:~# service ipsec start
root@ip-10-33-30-103:~# ipsec auto --add ToAWSSydneyVPC
root@ip-10-33-30-103:~# ipsec auto --up ToAWSSydneyVPC
root@ip-10-33-30-103:~# service ipsec status

AWS Sydney VPC:

AWS Sydney EC2:

root@ip-172-33-1-190:~# apt-get install openswan
root@ip-172-33-1-190:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSCnVPC
    left=172.33.1.190
    leftsubnets=172.33.0.0/16
    leftid=@AwsSydneyGW
    right=54.222.193.171
    rightsubnets=10.33.0.0/16
    rightid=@AwsChinaGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-172-33-1-190:~# cat /etc/ipsec.secrets 
@AwsSydneyGW  @AwsChinaGW: PSK "123321112233"
 
root@ip-172-33-1-190:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-172-33-1-190:~# ipsec verify
root@ip-172-33-1-190:~# service ipsec start
root@ip-172-33-1-190:~# ipsec auto --add ToAWSCnVPC
root@ip-172-33-1-190:~# ipsec auto --up ToAWSCnVPC
root@ip-172-33-1-190:~# service ipsec status

确保两边EC2所在安全组对UDP 500, UDP 4500, TCP 50和TCP 51允许通过
关闭两边EC2上的’Source/Dest checking'(更改源/目标 检查)

Elasticsearch cluster on Azure

Standard
#3 VM on Azure
#node1 10.0.0.3
#node2 10.0.0.4
#node3 10.0.0.5
 
apt-get update;
apt-get install default-jdk;
wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -;
echo "deb http://packages.elastic.co/elasticsearch/2.x/debian stable main" | sudo tee -a /etc/apt/sources.list.d/elasticsearch-2.x.list;
apt-get update && sudo apt-get install elasticsearch;
update-rc.d elasticsearch defaults 95 10;
 
apt-get install lvm2
fdisk /dev/sdc
pvcreate /dev/sdc1
pvdisplay 
vgcreate VolGroup00 /dev/sdc1
vgdisplay 
lvcreate -L 500GB -n lvData VolGroup00
lvdisplay 
mkfs -t ext4 /dev/VolGroup00/lvData
mkdir /data
mount /dev/VolGroup00/lvData /data/
blkid
  /dev/mapper/VolGroup00-lvData: UUID="b65c5a78-e078-4ca8-8119-2de94a414002" TYPE="ext4" 
cat /etc/fstab
  UUID=b65c5a78-e078-4ca8-8119-2de94a414002  /data   auto    defaults,nobootwait,nosuid,noexec,noatime,nodiratime    0 0
 
cat /etc/elasticsearch/elasticsearch.yml 
network.host: 0.0.0.0
cluster.name: es-cluster
node.name: node?
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping.timeout: 10s
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts: ["10.0.0.3"]
index.number_of_shards: 3
index.number_of_replicas: 1 
path.data: /data
 
mkdir /data/es-cluster
chown elasticsearch.elasticsearch /data/es-cluster
 
cat /usr/share/elasticsearch/bin/elasticsearch.in.sh
...
if [ "x$ES_MIN_MEM" = "x" ]; then
    ES_MIN_MEM=?g
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
    ES_MAX_MEM=??g
fi
...
 
/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head
/usr/share/elasticsearch/bin/plugin install analysis-smartcn
/usr/share/elasticsearch/bin/plugin install analysis-kuromoji
/usr/share/elasticsearch/bin/plugin list
 
/etc/init.d/elasticsearch start
移动(move)
把分片从一节点移动到另一个节点,可以指定索引名和分片号。
 
取消(cancel)
取消分配一个分片,可以指定索引名和分片号。
node参数可以指定在那个节点取消正在分配的分片。
allow_primary参数支持取消分配主分片。
 
分配(allocate)
分配一个未分配的分片到指定节点,可以指定索引名和分片号。
node参数指定分配到那个节点。
allow_primary参数可以强制分配主分片,不过这样可能导致数据丢失。
 
curl -XPOST 'localhost:9200/_cluster/reroute' -d '{
    "commands" : [ {
        "move" : 
            {
              "index" : "索引名称", "shard" : 分片号, 
              "from_node" : "节点名称A", "to_node" : "节点名称B"
            }
        },
       "cancel" : 
            {
              "index" : "索引名称", "shard" : 分片号, "node" : "节点名称"
            }
        },
        {
          "allocate" : {
              "index" : "索引名称", "shard" : 分片号, "node" : "节点名称"
          }
        }
    ]
}'
 
curl -XPOST localhost:9200/_aliases -d '
{
    "actions": [
        { "remove": {
            "alias": "别名",
            "index": "索引名A"
        }},
        { "add": {
            "alias": "别名",
            "index": "索引名B"
        }}
    ]
}
'
 
curl localhost:9200/_nodes/节点名称/plugins?pretty=true
 
curl -s localhost:9200/_cat/shards
 
 
Elasticsearch版本升级
https://www.elastic.co/guide/en/elasticsearch/reference/current/rolling-upgrades.html
1.
curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
  "transient": {
    "cluster.routing.allocation.enable": "none"
  }
}'
 
2.
curl -XPOST http://localhost:9200/_flush/synced
 
3.
apt-get update;
apt-get --only-upgrade install elasticsearch
 
4.
/usr/share/elasticsearch/bin/plugin remove analysis-kuromoji;
/usr/share/elasticsearch/bin/plugin remove analysis-smartcn;
/usr/share/elasticsearch/bin/plugin remove analysis-icu;
/usr/share/elasticsearch/bin/plugin remove mobz/elasticsearch-head;
 
/usr/share/elasticsearch/bin/plugin install analysis-kuromoji;
/usr/share/elasticsearch/bin/plugin install analysis-smartcn;
/usr/share/elasticsearch/bin/plugin install analysis-icu;
/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head;
 
5.
curl -XGET http://localhost:9200/_cat/nodes
 
6.
curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
  "transient": {
    "cluster.routing.allocation.enable": "all"
  }
}'
 
7.
curl -XGET http://localhost:9200/_cat/health

Jenkins安装配置

Standard

详细参考:http://jenkins-php.org

apt-get install default-jdk
wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -
sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list'
apt-get update
apt-get install jenkins
 
java -jar jenkins-cli.jar -s http://localhost:8080 install-plugin git phing checkstyle cloverphp crap4j dry htmlpublisher jdepend plot pmd violations warnings xunit
java -jar jenkins-cli.jar -s http://localhost:8080 safe-restart
 
apt-get install phpunit phpcodesniffer phploc pdepen phpmd phpcpd phpdox php5-xdebug libxml2-utils
1.
curl -L https://raw.githubusercontent.com/sebastianbergmann/php-jenkins-template/master/config.xml | java -jar jenkins-cli.jar -s http://localhost:8080 create-job php-template
 
Or add the template manually:
 
cd $JENKINS_HOME/jobs
mkdir php-template
cd php-template
wget https://raw.github.com/sebastianbergmann/php-jenkins-template/master/config.xml
cd ..
chown -R jenkins:jenkins php-template/
Reload Jenkins configuration, for instance using the Jenkins CLI:
java -jar jenkins-cli.jar -s http://localhost:8080 reload-configuration
 
2.Click on "New Job".
3.Enter a "Job name".
4.Select "Copy existing job" and enter "php-template" into the "Copy from" field.
5.Click "OK".
6.Uncheck the "Disable Build" option.
7.Fill in your "Source Code Management" information.
8.Configure a "Build Trigger", for instance "Poll SCM".
9.Click "Save".

Hive安装配置

Standard

Hadoop集群安装配置

wget http://mirrors.gigenet.com/apache/hive/stable-2/apache-hive-2.0.0-bin.tar.gz
 
tar zxf apache-hive-2.0.0-bin.tar.gz
mv apache-hive-2.0.0 /usr/local/
cd /usr/local
mv apache-hive-2.0.0 hive
 
cat /etc/profile
...
HIVE_HOME=/usr/local/hive
PATH=$PATH:$HIVE_HOME/bin
export HIVE_HOME PATH
...
 
cd /usr/local/hive/conf
cp hive-default.xml.template hive-default.xml
vi /usr/local/hive/conf/hive-site.xml
<configuration>
        <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://Master:3306/hive?createDatabaseIfNotExist=true</value>
        <description>JDBC connect string for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
        <description>Driver class name for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>hive<value>
        <description>username to use against metastore database</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>hive</value>
        <description>password to use against metastore database</description>
        </property>
</configuration>
mysql>CREATE USER 'hive' IDENTIFIED BY 'hive';
mysql>GRANT ALL PRIVILEGES ON *.* TO 'hive'@'hadoop-master' WITH GRANT OPTION;
mysql>FLUSH privileges;
mysql>CREATE DATABASE hive;
cd ~
wget http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.38.tar.gz
tar zxf mysql-connector-java-5.1.38.tar.gz
cd mysql-connector-java-5.1.38
cp mysql-connector-java-5.1.38-bin.jar /usr/local/hive/lib/
 
scp -r /usr/local/hive Slave1:/usr/local/
scp -r /usr/local/hive Slave2:/usr/local/
#create the schema
schematool -initSchema -dbType mysql
 
#for client on Slave1,2
hive --service metastore &
 
hive
hive> show databases;

#Slave1,2

cat /usr/local/hive/conf/hive-site.xml
<configuration>
    <property>  
        <name>hive.metastore.uris</name>  
        <value>thrift://Master:9083</value>
    </property>
</configuration>

Hadoop集群安装配置

Standard
#===Master,Slave1,2===
cat /etc/hosts
192.168.131.130 Master
192.168.131.131 Slave1
192.168.131.132 Slave2
 
#===Master===
cd ~/.ssh
rm ./id_rsa*
ssh-keygen -t rsa
cat ./id_rsa.pub >> ./authorized_keys
scp ~/.ssh/id_rsa.pub hadoop@Slave1:/home/hadoop/
scp ~/.ssh/id_rsa.pub hadoop@Slave2:/home/hadoop/
 
apt-get install openjdk-8-jre openjdk-8-jdk
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
 
wget http://mirror.metrocast.net/apache/hadoop/common/hadoop-2.7.2/hadoop-2.7.2.tar.gz
tar zxvf hadoop-2.7.2.tar.gz -C /uar/local/
cd /usr/local/;
mv hadoop-2.7.2 hadoop;
cd ./hadoop;
./bin/hadoop version
export PATH=$PATH:/usr/local/hadoop/bin:/usr/local/hadoop/sbin
 
vi /usr/local/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre

vi /usr/local/hadoop/etc/hadoop/slaves
#将作为DataNode的主机名写入该文件,每行一个,默认为localhost。
#让Master节点仅作为 NameNode使用时可删除localhost,只添加两行内容:Slave1和Slave2。

vi /usr/local/hadoop/etc/hadoop/core-site.xml

<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://Master:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>file:/usr/local/hadoop/tmp</value>
                <description>Abase for other temporary directories.</description>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/hdfs-site.xml

<configuration>
        <property>
                <name>dfs.namenode.secondary.http-address</name>
                <value>Master:50090</value>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>1</value>
        </property>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/name</value>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/data</value>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/mapred-site.xml

<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.address</name>
                <value>Master:10020</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.webapp.address</name>
                <value>Master:19888</value>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/yarn-site.xml

<configuration>
        <property>
                <name>yarn.resourcemanager.hostname</name>
                <value>Master</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
</configuration>
#===Master===
cd /usr/local
rm -r ./hadoop/tmp
rm -r ./hadoop/logs/*
tar zcvf hadoop.master.tar.gz ./hadoop
scp ./hadoop.master.tar.gz Slave1:/home/hadoop
scp ./hadoop.master.tar.gz Slave2:/home/hadoop
 
#===Slave1,2===
tar -zxf ~/hadoop.master.tar.gz -C /usr/local
chown -R hadoop:hadoop /usr/local/hadoop
#===Master===
#NameNode格式化
hdfs namenode -format
 
#启动Hadoop
start-all.sh
#OR
start-dfs.sh
start-yarn.sh
mr-jobhistory-daemon.sh start historyserver

#查看 DataNode 和 NameNode 的状态
http://192.168.101.130:50070/

hdfs dfsadmin -report
 
#创建用户目录
hdfs dfs -mkdir -p /user/hadoop
 
#将/usr/local/hadoop/etc/hadoop中的配置文件作为输入文件复制到分布式文件系统中
hdfs dfs -mkdir input
hdfs dfs -put /usr/local/hadoop/etc/hadoop/*.xml input
 
hdfs dfs -ls
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:48 input
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:54 output
 
hdfs dfs -ls /
Found 3 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-04 00:38 /system
drwxrwx---   - hadoop supergroup          0 2016-03-03 22:45 /tmp
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:47 /user
 
hdfs dfs -ls /user/hadoop
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:48 /user/hadoop/input
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:54 /user/hadoop/output
 
hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+'
hdfs dfs -cat output/*
1       dfsadmin
1       dfs.replication
1       dfs.namenode.secondary.http
1       dfs.namenode.name.dir
1       dfs.datanode.data.dir
 
hdfs dfs -rmr output
hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'mast[a-z.]+'
hdfs dfs -cat output/*
1       masters
1       master.protocol.acl

#查看任务进度
http://192.168.101.130:8088/cluster

#===Master===
stop-all.sh
#OR
stop-yarn.sh
stop-dfs.sh
mr-jobhistory-daemon.sh stop historyserver

#报错处理记录

tail /usr/local/hadoop/logs/hadoop-hadoop-datanode-Slave1.log
  FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid unassigned) service to Master/9000. Exiting.
http://stackoverflow.com/questions/30521474/hadoop-hdfs-formatting-gets-error-failed-for-block-pool
 
cat /usr/local/hadoop/logs/hadoop-hadoop-datanode-Slave1.log | grep CID
  2016-03-06 18:56:24,584 WARN org.apache.hadoop.hdfs.server.common.Storage: java.io.IOException: Incompatible clusterIDs in /usr/local/hadoop/tmp/dfs/data: namenode clusterID = CID-f134a9fa-041e-46f8-8e03-6cf78226a9cd; datanode clusterID = CID-4e691c0a-1cb2-46fd-9793-734a9b9047cf
 
Open your usr/local/hadoop/dfs/datanode/current/VERSION file and change to:
  clusterID=CID-f134a9fa-041e-46f8-8e03-6cf78226a9cd

Deleting files in Azure Blob Storage with Azure CLI

Standard
azure login
azure storage account list
azure storage account set YourAccountName
export AZURE_STORAGE_CONNECTION_STRING='DefaultEndpointsProtocol=https;AccountName=YourAccountName;AccountKey=myDSCnRVxxxxxNCwcFkkxxxxxx4nbhV10unY2yFRHNGb8VHZLjg77F9WA=='
azure storage container show YourContainer
azure storage container set YourContainer
azure storage blob show YourContainer BlobName
azure storage blob delete YourContainer BlobName

You can address a blob in your storage account using the following URL format:
http://storage-account-name.blob.core.windows.net/container-name/blob-name

For example, here is a URL that addresses one of the blobs in the diagram above:
http://sally.blob.core.windows.net/movies/MOV1.AVI

Dynamic DynamoDB

Standard

http://dynamic-dynamodb.readthedocs.org/en/latest/cloudformation_template.html
https://aws.amazon.com/cn/blogs/aws/auto-scale-dynamodb-with-dynamic-dynamodb/

This example will configure Dynamic DynamoDB to:
- Scale up your DynamoDB table when the consumed reads 90% of the total provisioned reads
- Scale up your DynamoDB table when the consumed writes 90% of the total provisioned writes
- Scale up your reads with 50%
- Scale up your writes with 40%
- Scale down your DynamoDB table when the consumed reads 30% of the total provisioned reads
- Scale down your DynamoDB table when the consumed writes 40% of the total provisioned writes
- Scale down your reads with 40%
- Scale down your writes with 70%
- Check for changes every 5 minutes
 
Command:
dynamic-dynamodb --table-name my-table \
--reads-upper-threshold 90 \
--reads-lower-threshold 30 \
--increase-reads-with 50 \
--decrease-reads-with 40 \
--writes-upper-threshold 90 \
--writes-lower-threshold 40 \
--increase-writes-with 40 \
--decrease-writes-with 70 \
--check-interval 300

搞不懂AWS为何不在界面上自带着就把这东西做进去-_-

Alpine Linux LNMP安装配置

Standard

看到说Docker官方Image将会开始要使用它,所以装个看看呗。
下载地址:www.alpinelinux.org

apk add nginx memcached mysql mysql-client php php-dev php-cli php-pear php-phar php-fpm php-gd php-memcache php-json php-mysql php-pdo php-pdo_mysql php-mysqli php-zip php-zlib php-bz2 php-ctype php-mcrypt bash git;
apk add gcc g++ make autoconf;
 
#first running php -m on the command
#then compare the output to php -n -m
vi /usr/bin/pecl;  # 'exec $PHP -C -q $INCARG ... '
pecl install xdebug
 
vi /etc/php/php.ini;
...
zend_extension=/usr/lib/php/modules/xdebug.so
xdebug.remote_enable=1
xdebug.remote_port=9090
xdebug.remote_connect_back=1
xdebug.auto_trace=1
xdebug.collect_params=1
xdebug.collect_return=1
xdebug.profiler_enable=1
xdebug.var_display_max_data=10000
xdebug.var_display_max_depth=20
...
 
vi /etc/nginx/nginx.conf;
    ...
    location ~ \.php$ {                                                     
        root           html;                                                
        fastcgi_pass   127.0.0.1:9000;                                      
        fastcgi_index  index.php;                                           
        fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name; 
        include        fastcgi_params;                                      
    }    
    ...
 
vi /usr/share/nginx/html/index.php;
<?php
    phpinfo();
?>
 
apk add curl openssl php-openssl;
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer;
 
/etc/init.d/php-fpm start;
/etc/init.d/nginx start;
 
/etc/init.d/mariadb setup;
/etc/init.d/mariadb start;
/etc/init.d/memcached start;
 
apk add phpmyadmin;
cd /usr/share/nginx/html/;
cp -rf /usr/share/webapps/phpmyadmin ./;
cp /etc/phpmyadmin/config.inc.php ./phpmyadmin;
apk del phpmyadmin;
 
cd /usr/share/nginx/html;
wget https://github.com/jokkedk/webgrind/archive/master.zip;
unzip master.zip;
mv webgrind-master webgrind;
apk add python graphviz font-misc-misc;
 
cd /etc/profile.d;
cat color_prompt;
  # Setup a red prompt for root and a green one for users.
  # rename this file to color_prompt.sh to actually enable it
  NORMAL="\[\e[0m\]"
  RED="\[\e[1;31m\]"
  GREEN="\[\e[1;32m\]"
  if [ "$USER" = root ]; then
        PS1="$RED\h [$NORMAL\w$RED]# $NORMAL"
  else
        PS1="$GREEN\h [$NORMAL\w$GREEN]\$ $NORMAL"
  fi
 
mv color_prompt color_prompt.sh;
vi alias.sh;
  alias ll="ls -al"
  alias rm="rm -i"
 
#yemaosheng.com

SMS and phone call from Zabbix using Twilio

Standard


vim /etc/zabbix/alert.d/zabbix-alert-sms-twilio.sh

#!/usr/bin/python
import sys
from twilio.rest import TwilioRestClient
 
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC0axxxxxxxxxxxxxxxxxxxx296ae"
auth_token  = "93e4xxxxxxxxxxxxxxxxxxx63e9"
client = TwilioRestClient(account_sid, auth_token)
 
message = client.messages.create(body=sys.argv[2],
    to=sys.argv[1],
    from_="+16572338xx8") # Replace with your Twilio number
print message.sid

vim /etc/zabbix/alert.d/zabbix-alert-call-twilio.sh

#!/usr/bin/python
import sys
import urllib
from twilio.rest import TwilioRestClient
 
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC0axxxxxxxxxxxxxxxxxxxx296ae"
auth_token  = "93e4xxxxxxxxxxxxxxxxxxx63e9"
client = TwilioRestClient(account_sid, auth_token)
 
TwiML="http://yemaosheng.com/getTwiML.php?say="+urllib.quote(sys.argv[2])
 
call = client.calls.create(to=sys.argv[1],
    from_="+16572338xx8",
    url=TwiML)
print call.sid

vim /var/www/html/getTwiML.php

<?xml version="1.0" encoding="UTF-8"?>
<Response>
    <Say voice="woman"><?php echo htmlspecialchars($_GET['say']); ?></Say>
    <Hangup/>
</Response>