Using SaltStack to deploy Auto-scaling EC2

Standard

SaltStack Master: 172.66.1.100

Create AMI by default VM:

root@ip-x.x.x.x:~# cat /etc/rc.local
/root/PkgInit.sh;
/root/SaltMinionInit.sh;
/root/SaltCall.sh;
 
root@ip-x.x.x.x:~# cat /root/PkgInit.sh 
add-apt-repository ppa:saltstack/salt -y;
apt-get update;
apt-get install salt-minion -y;
apt-get install awscli -y;
 
root@ip-x.x.x.x:~# cat /root/SaltMinionInit.sh
INSTANCE_ID=$(ec2metadata --instance-id);
REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\" '{print $4}');
TAG=$(aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION --output=text --max-items=1 | cut -f5);
/bin/echo -e "master: 172.66.1.100\ngrains:\n  roles:\n    - "$TAG > /etc/salt/minion;
service salt-minion restart;
 
root@ip-x.x.x.x:~# cat /root/SaltCall.sh
sleep 15s;
salt-call state.highstate;

Setup Saltstack Master:

root@ip-172-66-1-100:~# add-apt-repository ppa:saltstack/salt
root@ip-172-66-1-100:~# apt-get update
root@ip-172-66-1-100:~# apt-get install salt-master
root@ip-172-66-1-100:~# cat /etc/salt/master | grep -v '^#' | grep -v '^$'
file_roots:
  base:
    - /srv/salt
pillar_roots:
  base:
    - /srv/pillar
reactor:
  - 'salt/auth':
    - /srv/reactor/auth-pending.sls
 
# Automating Key Acceptance
# salt-run state.event pretty=True
root@ip-172-66-1-100:~# cat /srv/reactor/auth-pending.sls
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ip-172') %}
minion_add:
  wheel.key.accept:
    - match: {{ data['id'] }}
{% endif %}
 
# Get grains item
root@ip-172-66-1-100:~# salt '*' grains.item os
ip-172-66-2-214:
    ----------
    os:
        Ubuntu
ip-172-66-4-93:
    ----------
    os:
        Ubuntu
root@ip-172-66-1-100:/srv/reactor# salt '*' grains.item roles
ip-172-66-2-214:
    ----------
    roles:
        - YeMaosheng_com
ip-172-66-4-93:
    ----------
    roles:
        - YeMaosheng_com
 
root@ip-172-66-1-100:~# salt -G 'roles:YeMaosheng_com' state.highstate -t 60 test=True
root@ip-172-66-1-100:~# salt -G 'roles:YeMaosheng_com' state.highstate

网站所用EC2的安装及发布配置

├── pillar
│   ├── yemaosheng_com
│   │   ├── nginx.sls
│   │   ├── php56.sls
│   │   └── website.sls
│   └── top.sls
├── reactor
│   └── auth-pending.sls
└── salt
    ├── crontab
    │   └── init.sls
    ├── mysql-client
    │   └── init.sls
    ├── nginx
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       ├── blockrules.conf
    │   │       ├── nginx.conf
    │   │       └── sites-enabled
    │   │           └── yemaosheng.com
    │   └── init.sls
    ├── php56
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       └── php5-fpm
    │   │           └── www.conf
    │   └── init.sls
    ├── top.sls
    ├── website
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       ├── dhparam.pem
    │   │       └── sslkey
    │   └── init.sls
    └── websitefiles
        └── yemaosheng_com -> /var/www/yemaosheng_com
 
cat /srv/salt/top.sls 
base:
 'roles:yemaosheng_com':
 - match: grain
 - mysql-client
 - php56
 - nginx
 - website
 - crontab
 
cat /srv/pillar/top.sls 
base : 
 'roles:yemaosheng_com':
 - match: grain
 - yemaosheng_com.nginx
 - yemaosheng_com.php56
 - yemaosheng_com.website
 
cat /srv/pillar/yemaosheng_com/nginx.sls 
nginx_conf: nginx/configs/yemaosheng_com/nginx.conf
nginx_site-enable: nginx/configs/yemaosheng_com/sites-enabled
 
cat /srv/salt/nginx/init.sls 
{% set site_name = pillar['site_name'] %}
 
nginx:
  pkg:
    - name: nginx
    - installed
 
nginx_conf:
  service.running:
    - name: nginx
    - enable: True
    - reload: True
    - watch:
      - file: /etc/nginx/*
  file.managed:
    - name: /etc/nginx/nginx.conf
    - source: salt://{{ pillar['nginx_conf'] }}
    - user: root
    - group: root
    - mode: '0640'
    - require:
      - pkg: nginx
 
{% if site_name == 'yemaosheng_com' %}
upload_sslkey_to_nginx:
  file.recurse:
    - name: /srv/ssl
    - user: root
    - group: root
    - file_mode: '0644'
    - source: salt://website/configs/yemaosheng_com/sslkey
    - include_empty: True
 
upload_dhparam_to_nginx:
  file.managed:
    - name: /etc/nginx/dhparam.pem
    - source: salt://website/configs/yemaosheng_com/dhparam.pem
    - user: root
    - group: root
    - mode: '0644'
    - require:
      - pkg: nginx
{% endif %}
 
/etc/nginx/sites-enabled:
  service.running:
    - name: nginx
    - enable: True
    - reload: True
    - watch:
      - file: /etc/nginx/sites-enabled
  file.recurse:
    - name: /etc/nginx/sites-enabled
    - user: root
    - group: root
    - dir_mode: 2775
    - file_mode: '0644'
    - source: salt://{{ pillar['nginx_site-enable'] }}
    - include_empty: True
    - clean: True
    - require:
      - pkg: nginx

AWS VPC point to point with gre tunnel

Standard

related AWS VPC通过IPsec连接不同Region

AWS China EC2:

root@ip-10-33-30-103:/home/ubuntu# cat /etc/network/interfaces.d/gre1.cfg
auto gre1
iface gre1 inet tunnel
  mode gre
  netmask 255.255.255.255
  address 10.0.0.2
  dstaddr 10.0.0.1
  endpoint 52.63.189.251
  local 10.33.30.103
  ttl 255
 
root@ip-10-33-30-103:/home/ubuntu# route add -net 172.33.0.0 netmask 255.255.0.0 gw 10.0.0.2

AWS Sydney EC2:

root@ip-172-33-1-190:/home/ubuntu# cat /etc/network/interfaces.d/gre1.cfg 
auto gre1
iface gre1 inet tunnel
  mode gre
  netmask 255.255.255.255
  address 10.0.0.1
  dstaddr 10.0.0.2
  endpoint 54.222.193.171
  local 172.33.1.190
  ttl 255
 
root@ip-172-33-1-190:/home/ubuntu# route add -net 10.33.0.0 netmask 255.255.0.0 gw 10.0.0.1

AWS Cloudwatch query script for Zabbix

Standard
#!/usr/bin/python
import boto.ec2.cloudwatch
import sys
import datetime
 
try:
    metName = sys.argv[1]
    funcName = sys.argv[2]
    dimSpace = sys.argv[3]
    region = sys.argv[4]
    accessKey = sys.argv[5]
    secretKey = sys.argv[6]
 
except:
    print "Usage: GetFromCloudwatch.py MetricName Function Dimension Region AWS_ACCESS_KEY AWS_SECRET_ACCESS_KEY"
    print "Example: GetFromCloudwatch.py FreeableMemory Average \"CacheClusterId=ElsticCacheName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    print "         GetFromCloudwatch.py CPUUtilization Average \"DBInstanceIdentifier=RDSName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    print "         GetFromCloudwatch.py ApproximateNumberOfMessagesVisible Average \"QueueName=SQSName\" us-east-1 ACCESS_KEY SECRET_ACCESS_KEY"
    sys.exit(1)
 
dim = {}
firstSplit = dimSpace.split(',')
for word in firstSplit:
    secondSplit = word.split('=')
    dim[secondSplit[0]] = secondSplit[1]
 
regions = boto.ec2.cloudwatch.regions()
 
reg = ''
for r in regions:
    if region == r.name:
        reg = r
 
c = boto.ec2.cloudwatch.CloudWatchConnection(aws_access_key_id=accessKey, aws_secret_access_key=secretKey, region=reg)
metrics = c.list_metrics(dimensions=dim)
 
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=15)
 
dataPoints = [];
for met in metrics:
    if met.name == metName:
        dataPoints = met.query(start, end, funcName)
 
if len(dataPoints) > 0:
    max = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
    index = 0
    for i in range(0,len(dataPoints)):
        if max < dataPoints[i][u'Timestamp']:
            max = dataPoints[i][u'Timestamp']
            index = i
    for key in dataPoints[index].keys():
        if funcName in key:
            value = dataPoints[index][key]
    print value
else:
    print 'Error! No response from Amazon.'
    sys.exit(2)

AWS VPC通过IPsec连接不同Region

Standard

AWS China<->AWS Sydney

AWS China VPC:

AWS China EC2:

root@ip-10-33-30-103:~# apt-get install openswan
root@ip-10-33-30-103:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSSydneyVPC
    left=10.33.30.103
    leftsubnets=10.33.0.0/16
    leftid=@AwsChinaGW
    right=52.63.189.251
    rightsubnets=172.33.0.0/16
    rightid=@AwsSydneyGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-10-33-30-103:~# cat /etc/ipsec.secrets 
@AwsChinaGW  @AwsSydneyGW: PSK "123321112233"
 
root@ip-10-33-30-103:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-10-33-30-103:~# ipsec verify
root@ip-10-33-30-103:~# service ipsec start
root@ip-10-33-30-103:~# ipsec auto --add ToAWSSydneyVPC
root@ip-10-33-30-103:~# ipsec auto --up ToAWSSydneyVPC
root@ip-10-33-30-103:~# service ipsec status

AWS Sydney VPC:

AWS Sydney EC2:

root@ip-172-33-1-190:~# apt-get install openswan
root@ip-172-33-1-190:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSCnVPC
    left=172.33.1.190
    leftsubnets=172.33.0.0/16
    leftid=@AwsSydneyGW
    right=54.222.193.171
    rightsubnets=10.33.0.0/16
    rightid=@AwsChinaGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-172-33-1-190:~# cat /etc/ipsec.secrets 
@AwsSydneyGW  @AwsChinaGW: PSK "123321112233"
 
root@ip-172-33-1-190:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-172-33-1-190:~# ipsec verify
root@ip-172-33-1-190:~# service ipsec start
root@ip-172-33-1-190:~# ipsec auto --add ToAWSCnVPC
root@ip-172-33-1-190:~# ipsec auto --up ToAWSCnVPC
root@ip-172-33-1-190:~# service ipsec status

确保两边EC2所在安全组对UDP 500, UDP 4500, TCP 50和TCP 51允许通过
关闭两边EC2上的’Source/Dest checking'(更改源/目标 检查)

Dynamic DynamoDB

Standard

http://dynamic-dynamodb.readthedocs.org/en/latest/cloudformation_template.html
https://aws.amazon.com/cn/blogs/aws/auto-scale-dynamodb-with-dynamic-dynamodb/

This example will configure Dynamic DynamoDB to:
- Scale up your DynamoDB table when the consumed reads 90% of the total provisioned reads
- Scale up your DynamoDB table when the consumed writes 90% of the total provisioned writes
- Scale up your reads with 50%
- Scale up your writes with 40%
- Scale down your DynamoDB table when the consumed reads 30% of the total provisioned reads
- Scale down your DynamoDB table when the consumed writes 40% of the total provisioned writes
- Scale down your reads with 40%
- Scale down your writes with 70%
- Check for changes every 5 minutes
 
Command:
dynamic-dynamodb --table-name my-table \
--reads-upper-threshold 90 \
--reads-lower-threshold 30 \
--increase-reads-with 50 \
--decrease-reads-with 40 \
--writes-upper-threshold 90 \
--writes-lower-threshold 40 \
--increase-writes-with 40 \
--decrease-writes-with 70 \
--check-interval 300

搞不懂AWS为何不在界面上自带着就把这东西做进去-_-

How to use AWS ElasticCache on Azure

Standard
[ec2-user@ip-174-129-100-10 ~]$ cat /etc/rc.local
echo "redis-name";
ADDRESS=`nslookup redis-name.7exo1h.0001.use1.cache.amazonaws.com | grep "Address:" | tail -n 1 | awk '{print $2}'`;
echo $ADDRESS;
iptables -t nat -A PREROUTING -i eth0 -p tcp -m tcp --dport 26379 -j DNAT --to-destination $ADDRESS:6379;
iptables -t nat -A POSTROUTING -j MASQUERADE;
sysctl net.ipv4.ip_forward=1;
 
[ec2-user@ip-174-129-100-10 ~]$ iptables -t nat -L
Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination         
DNAT       tcp  --  anywhere             anywhere             tcp dpt:26379 to:10.153.181.100:6379
 
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination         
MASQUERADE  all  --  anywhere             anywhere
azure@azure:/home/azure# cat /etc/rc.local
# redis-name
iptables -t nat -I OUTPUT -p tcp -d redis-name.7exo1h.0001.use1.cache.amazonaws.com --dport 6379  -j DNAT --to-destination 174.129.100.10:26379
 
azure@azure:/home/azure# iptables -t nat -L
Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination         
 
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
DNAT       tcp  --  anywhere             ec2-50-17-68-100.compute-1.amazonaws.com  tcp dpt:6379 to:174.129.100.10:26379
 
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination

A lookup table of the Azure and AWS

Standard
Microsoft Azure Amazon Web Services (AWS)
Available Regions Azure Regions AWS Global Infrastructure
Compute Services Virtual Machines (VMs) Elastic Compute Cloud (EC2)
Cloud Services
Azure Websites and Apps
Amazon Elastic Beanstalk
Azure Visual Studio Online None
Container Support Docker Virtual Machine Extension (how to) EC2 Container Service (Preview)
Scaling Options Azure Autoscale (how to) Auto Scaling
Analytics/Hadoop Options HDInsight (Hadoop) Elastic MapReduce (EMR)
Government Services Azure Government AWS GovCloud
App/Desktop Services Azure RemoteApp Amazon WorkSpaces
Amazon AppStream
Storage Options Azure Storage (Blobs, Tables, Queues, Files) Amazon Simplge Storage (S3)
Block Storage Azure Blob Storage (how to) Amazon Elastic Block Storage (EBS)
Hybrid Cloud Storage StorSimple None
Backup Options Azure Backup Amazon Glacier
Storage Services Azure Import Export (how to) Amazon Import / Export
Azure File Storage (how to) AWS Storage Gateway
Azure Site Recovery None
Content Delivery Network (CDN ) Azure CDN Amazon CloudFront
Database Options Azure SQL Database Amazon Relational Database Service (RDS)
Amazon Redshift
NoSQL Database Options Azure DocumentDB Amazon Dynamo DB

Azure Managed Cache (Redis Cache) Amazon Elastic Cache
Data Orchestration Azure Data Factory AWS Data Pipeline
Networking Options Azure Virtual Network Amazon VPC
Azure ExpressRoute AWS Direct Connect
Azure Traffic Manager Amazon Route 53
Load Balancing Load Balancing for Azure (how to) Elastic  Load Balancing
Administration & Security Azure Active Directory AWS Directory Service
AWS Identity and Access Management (IAM)
Multi-Factor Authentication Azure Multi-Factor Authentication AWS Multi-Factor Authentication
Monitoring Azure Operational Insights Amazon CloudTrail
Azure Application Insights Amazon CloudWatch
Azure Event Hubs None
Azure Notification Hubs Amazon Simple Notification Service (SNS)
Azure Key Vault (Preview) AWS Key Management Service
Compliance Azure Trust Center AWS CLoudHSM
Management Services & Options Azure Resource Manager Amazon CloudFormation
API Management Azure API Management None
Automation Azure Automation AWS OpsWorks
Azure Batch
Azure Service Bus
Amazon Simple Queue Service (SQS)
Amazon Simple Workflow (SWF)
None AWS CodeDeploy
Azure Scheduler None
Azure Search Amazon CloudSearch
Analytics Azure Stream Analytics Amazon Kinesis
Email Services Azure BizTalk Services Amazon Simple Email Services (SES)
Media Services Azure Media Services Amazon Elastic Transcoder
Amazon Mobile Analytics
Amazon Cognitor
Other Services & Integrations Azure Machine Learning (Preview) None
None AWS Lambda (Preview)
None AWS Config (Preview)

Using Custom Amazon CloudWatch Metrics

Standard

日志样式如下:

...
[2015-07-16 09:01:29] production.ERROR: Aws ...
...xxx...
[2015-07-16 09:07:23] production.ERROR: [Notification - getDetails] ...
...xxx...
[2015-07-16 09:16:04] production.ERROR: URL: ...
...xxx...
...

放crontab里跑的shell

#!/bin/bash
if  [ ! -n "$1" ] ;then
  echo -e "no argument\nusage: ./SendPHPErrorNumber2Cloudwatch.sh 5"
  exit
fi
periodMinute=$1
 
dateNow=`date "+%Y-%m-%d"`
 
logFile="/var/log/yemaosheng_com_php-log-"${dateNow}".txt"
if [ ! -f "$logFile" ]; then
  echo "No File"
  exit
fi
 
minuteAgo=`date "+%Y-%m-%d %H:%M:%S" -d "${periodMinute} minute ago"`
 
instanceId=`GET http://169.254.169.254/latest/meta-data/instance-id`
 
#tail -n 500 ${logFile} | awk '/^\[/{print $1" "$2}'
#tail -n 500 ${logFile} | grep ERROR
#    sed 's/^.//' | sed 's/.$//'
#    tr -d "[" | tr -d "]"
#    cut -c 2-20
 
errorNum=`tail -n 500 ${logFile} | grep ERROR | cut -c 2-20 | awk '{if ($0>minuteAgo){print minuteAgo} }' minuteAgo="$minuteAgo" | wc -l`
 
/usr/local/bin/aws cloudwatch put-metric-data --metric-name PHPError --namespace LogError --dimensions Name=InstanceId,Value=${instanceId} --value ${errorNum} --timestamp `date "+%Y-%m-%dT%H:%M:%S.000Z"`