Using SaltStack to deploy Auto-scaling EC2

Standard

SaltStack Master: 172.66.1.100

Create AMI by default VM:

root@ip-x.x.x.x:~# cat /etc/rc.local
/root/PkgInit.sh;
/root/SaltMinionInit.sh;
/root/SaltCall.sh;
 
root@ip-x.x.x.x:~# cat /root/PkgInit.sh 
add-apt-repository ppa:saltstack/salt -y;
apt-get update;
apt-get install salt-minion -y;
apt-get install awscli -y;
 
root@ip-x.x.x.x:~# cat /root/SaltMinionInit.sh
INSTANCE_ID=$(ec2metadata --instance-id);
REGION=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | grep region | awk -F\" '{print $4}');
TAG=$(aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION --output=text --max-items=1 | cut -f5);
/bin/echo -e "master: 172.66.1.100\ngrains:\n  roles:\n    - "$TAG > /etc/salt/minion;
service salt-minion restart;
 
root@ip-x.x.x.x:~# cat /root/SaltCall.sh
sleep 15s;
salt-call state.highstate;

Setup Saltstack Master:

root@ip-172-66-1-100:~# add-apt-repository ppa:saltstack/salt
root@ip-172-66-1-100:~# apt-get update
root@ip-172-66-1-100:~# apt-get install salt-master
root@ip-172-66-1-100:~# cat /etc/salt/master | grep -v '^#' | grep -v '^$'
file_roots:
  base:
    - /srv/salt
pillar_roots:
  base:
    - /srv/pillar
reactor:
  - 'salt/auth':
    - /srv/reactor/auth-pending.sls
 
# Automating Key Acceptance
# salt-run state.event pretty=True
root@ip-172-66-1-100:~# cat /srv/reactor/auth-pending.sls
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ip-172') %}
minion_add:
  wheel.key.accept:
    - match: {{ data['id'] }}
{% endif %}
 
# Get grains item
root@ip-172-66-1-100:~# salt '*' grains.item os
ip-172-66-2-214:
    ----------
    os:
        Ubuntu
ip-172-66-4-93:
    ----------
    os:
        Ubuntu
root@ip-172-66-1-100:/srv/reactor# salt '*' grains.item roles
ip-172-66-2-214:
    ----------
    roles:
        - YeMaosheng_com
ip-172-66-4-93:
    ----------
    roles:
        - YeMaosheng_com
 
root@ip-172-66-1-100:~# salt -G 'roles:YeMaosheng_com' state.highstate -t 60 test=True
root@ip-172-66-1-100:~# salt -G 'roles:YeMaosheng_com' state.highstate

网站所用EC2的安装及发布配置

├── pillar
│   ├── yemaosheng_com
│   │   ├── nginx.sls
│   │   ├── php56.sls
│   │   └── website.sls
│   └── top.sls
├── reactor
│   └── auth-pending.sls
└── salt
    ├── crontab
    │   └── init.sls
    ├── mysql-client
    │   └── init.sls
    ├── nginx
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       ├── blockrules.conf
    │   │       ├── nginx.conf
    │   │       └── sites-enabled
    │   │           └── yemaosheng.com
    │   └── init.sls
    ├── php56
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       └── php5-fpm
    │   │           └── www.conf
    │   └── init.sls
    ├── top.sls
    ├── website
    │   ├── configs
    │   │   └── yemaosheng_com
    │   │       ├── dhparam.pem
    │   │       └── sslkey
    │   └── init.sls
    └── websitefiles
        └── yemaosheng_com -> /var/www/yemaosheng_com
 
cat /srv/salt/top.sls 
base:
 'roles:yemaosheng_com':
 - match: grain
 - mysql-client
 - php56
 - nginx
 - website
 - crontab
 
cat /srv/pillar/top.sls 
base : 
 'roles:yemaosheng_com':
 - match: grain
 - yemaosheng_com.nginx
 - yemaosheng_com.php56
 - yemaosheng_com.website
 
cat /srv/pillar/yemaosheng_com/nginx.sls 
nginx_conf: nginx/configs/yemaosheng_com/nginx.conf
nginx_site-enable: nginx/configs/yemaosheng_com/sites-enabled
 
cat /srv/salt/nginx/init.sls 
{% set site_name = pillar['site_name'] %}
 
nginx:
  pkg:
    - name: nginx
    - installed
 
nginx_conf:
  service.running:
    - name: nginx
    - enable: True
    - reload: True
    - watch:
      - file: /etc/nginx/*
  file.managed:
    - name: /etc/nginx/nginx.conf
    - source: salt://{{ pillar['nginx_conf'] }}
    - user: root
    - group: root
    - mode: '0640'
    - require:
      - pkg: nginx
 
{% if site_name == 'yemaosheng_com' %}
upload_sslkey_to_nginx:
  file.recurse:
    - name: /srv/ssl
    - user: root
    - group: root
    - file_mode: '0644'
    - source: salt://website/configs/yemaosheng_com/sslkey
    - include_empty: True
 
upload_dhparam_to_nginx:
  file.managed:
    - name: /etc/nginx/dhparam.pem
    - source: salt://website/configs/yemaosheng_com/dhparam.pem
    - user: root
    - group: root
    - mode: '0644'
    - require:
      - pkg: nginx
{% endif %}
 
/etc/nginx/sites-enabled:
  service.running:
    - name: nginx
    - enable: True
    - reload: True
    - watch:
      - file: /etc/nginx/sites-enabled
  file.recurse:
    - name: /etc/nginx/sites-enabled
    - user: root
    - group: root
    - dir_mode: 2775
    - file_mode: '0644'
    - source: salt://{{ pillar['nginx_site-enable'] }}
    - include_empty: True
    - clean: True
    - require:
      - pkg: nginx

AWS VPC point to point with gre tunnel

Standard

related AWS VPC通过IPsec连接不同Region

AWS China EC2:

root@ip-10-33-30-103:/home/ubuntu# cat /etc/network/interfaces.d/gre1.cfg
auto gre1
iface gre1 inet tunnel
  mode gre
  netmask 255.255.255.255
  address 10.0.0.2
  dstaddr 10.0.0.1
  endpoint 52.63.189.251
  local 10.33.30.103
  ttl 255
 
root@ip-10-33-30-103:/home/ubuntu# route add -net 172.33.0.0 netmask 255.255.0.0 gw 10.0.0.2

AWS Sydney EC2:

root@ip-172-33-1-190:/home/ubuntu# cat /etc/network/interfaces.d/gre1.cfg 
auto gre1
iface gre1 inet tunnel
  mode gre
  netmask 255.255.255.255
  address 10.0.0.1
  dstaddr 10.0.0.2
  endpoint 54.222.193.171
  local 172.33.1.190
  ttl 255
 
root@ip-172-33-1-190:/home/ubuntu# route add -net 10.33.0.0 netmask 255.255.0.0 gw 10.0.0.1

AWS VPC通过IPsec连接不同Region

Standard

AWS China<->AWS Sydney

AWS China VPC:

AWS China EC2:

root@ip-10-33-30-103:~# apt-get install openswan
root@ip-10-33-30-103:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSSydneyVPC
    left=10.33.30.103
    leftsubnets=10.33.0.0/16
    leftid=@AwsChinaGW
    right=52.63.189.251
    rightsubnets=172.33.0.0/16
    rightid=@AwsSydneyGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-10-33-30-103:~# cat /etc/ipsec.secrets 
@AwsChinaGW  @AwsSydneyGW: PSK "123321112233"
 
root@ip-10-33-30-103:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-10-33-30-103:~# ipsec verify
root@ip-10-33-30-103:~# service ipsec start
root@ip-10-33-30-103:~# ipsec auto --add ToAWSSydneyVPC
root@ip-10-33-30-103:~# ipsec auto --up ToAWSSydneyVPC
root@ip-10-33-30-103:~# service ipsec status

AWS Sydney VPC:

AWS Sydney EC2:

root@ip-172-33-1-190:~# apt-get install openswan
root@ip-172-33-1-190:~# cat /etc/ipsec.conf 
config setup
    protostack=netkey
    interfaces=%defaultroute
    nat_traversal=yes
    force_keepalive=yes
    keep_alive=60
    oe=no
    nhelpers=0
conn ToAWSCnVPC
    left=172.33.1.190
    leftsubnets=172.33.0.0/16
    leftid=@AwsSydneyGW
    right=54.222.193.171
    rightsubnets=10.33.0.0/16
    rightid=@AwsChinaGW
    forceencaps=yes
    authby=secret
    auto=ignore
 
root@ip-172-33-1-190:~# cat /etc/ipsec.secrets 
@AwsSydneyGW  @AwsChinaGW: PSK "123321112233"
 
root@ip-172-33-1-190:~# cat /etc/sysctl.conf | grep -v '^#' | grep -v '^$'
net.ipv4.ip_forward = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.send_redirects = 0
 
root@ip-172-33-1-190:~# ipsec verify
root@ip-172-33-1-190:~# service ipsec start
root@ip-172-33-1-190:~# ipsec auto --add ToAWSCnVPC
root@ip-172-33-1-190:~# ipsec auto --up ToAWSCnVPC
root@ip-172-33-1-190:~# service ipsec status

确保两边EC2所在安全组对UDP 500, UDP 4500, TCP 50和TCP 51允许通过
关闭两边EC2上的’Source/Dest checking'(更改源/目标 检查)