Hadoop集群安装配置

Standard
#===Master,Slave1,2===
cat /etc/hosts
192.168.131.130 Master
192.168.131.131 Slave1
192.168.131.132 Slave2
 
#===Master===
cd ~/.ssh
rm ./id_rsa*
ssh-keygen -t rsa
cat ./id_rsa.pub >> ./authorized_keys
scp ~/.ssh/id_rsa.pub hadoop@Slave1:/home/hadoop/
scp ~/.ssh/id_rsa.pub hadoop@Slave2:/home/hadoop/
 
apt-get install openjdk-8-jre openjdk-8-jdk
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
 
wget http://mirror.metrocast.net/apache/hadoop/common/hadoop-2.7.2/hadoop-2.7.2.tar.gz
tar zxvf hadoop-2.7.2.tar.gz -C /uar/local/
cd /usr/local/;
mv hadoop-2.7.2 hadoop;
cd ./hadoop;
./bin/hadoop version
export PATH=$PATH:/usr/local/hadoop/bin:/usr/local/hadoop/sbin
 
vi /usr/local/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre

vi /usr/local/hadoop/etc/hadoop/slaves
#将作为DataNode的主机名写入该文件,每行一个,默认为localhost。
#让Master节点仅作为 NameNode使用时可删除localhost,只添加两行内容:Slave1和Slave2。

vi /usr/local/hadoop/etc/hadoop/core-site.xml

<configuration>
        <property>
                <name>fs.defaultFS</name>
                <value>hdfs://Master:9000</value>
        </property>
        <property>
                <name>hadoop.tmp.dir</name>
                <value>file:/usr/local/hadoop/tmp</value>
                <description>Abase for other temporary directories.</description>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/hdfs-site.xml

<configuration>
        <property>
                <name>dfs.namenode.secondary.http-address</name>
                <value>Master:50090</value>
        </property>
        <property>
                <name>dfs.replication</name>
                <value>1</value>
        </property>
        <property>
                <name>dfs.namenode.name.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/name</value>
        </property>
        <property>
                <name>dfs.datanode.data.dir</name>
                <value>file:/usr/local/hadoop/tmp/dfs/data</value>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/mapred-site.xml

<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.address</name>
                <value>Master:10020</value>
        </property>
        <property>
                <name>mapreduce.jobhistory.webapp.address</name>
                <value>Master:19888</value>
        </property>
</configuration>

vi /usr/local/hadoop/etc/hadoop/yarn-site.xml

<configuration>
        <property>
                <name>yarn.resourcemanager.hostname</name>
                <value>Master</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
</configuration>
#===Master===
cd /usr/local
rm -r ./hadoop/tmp
rm -r ./hadoop/logs/*
tar zcvf hadoop.master.tar.gz ./hadoop
scp ./hadoop.master.tar.gz Slave1:/home/hadoop
scp ./hadoop.master.tar.gz Slave2:/home/hadoop
 
#===Slave1,2===
tar -zxf ~/hadoop.master.tar.gz -C /usr/local
chown -R hadoop:hadoop /usr/local/hadoop
#===Master===
#NameNode格式化
hdfs namenode -format
 
#启动Hadoop
start-all.sh
#OR
start-dfs.sh
start-yarn.sh
mr-jobhistory-daemon.sh start historyserver

#查看 DataNode 和 NameNode 的状态
http://192.168.101.130:50070/

hdfs dfsadmin -report
 
#创建用户目录
hdfs dfs -mkdir -p /user/hadoop
 
#将/usr/local/hadoop/etc/hadoop中的配置文件作为输入文件复制到分布式文件系统中
hdfs dfs -mkdir input
hdfs dfs -put /usr/local/hadoop/etc/hadoop/*.xml input
 
hdfs dfs -ls
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:48 input
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:54 output
 
hdfs dfs -ls /
Found 3 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-04 00:38 /system
drwxrwx---   - hadoop supergroup          0 2016-03-03 22:45 /tmp
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:47 /user
 
hdfs dfs -ls /user/hadoop
Found 2 items
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:48 /user/hadoop/input
drwxr-xr-x   - hadoop supergroup          0 2016-03-03 22:54 /user/hadoop/output
 
hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'dfs[a-z.]+'
hdfs dfs -cat output/*
1       dfsadmin
1       dfs.replication
1       dfs.namenode.secondary.http
1       dfs.namenode.name.dir
1       dfs.datanode.data.dir
 
hdfs dfs -rmr output
hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar grep input output 'mast[a-z.]+'
hdfs dfs -cat output/*
1       masters
1       master.protocol.acl

#查看任务进度
http://192.168.101.130:8088/cluster

#===Master===
stop-all.sh
#OR
stop-yarn.sh
stop-dfs.sh
mr-jobhistory-daemon.sh stop historyserver

#报错处理记录

tail /usr/local/hadoop/logs/hadoop-hadoop-datanode-Slave1.log
  FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid unassigned) service to Master/9000. Exiting.
http://stackoverflow.com/questions/30521474/hadoop-hdfs-formatting-gets-error-failed-for-block-pool
 
cat /usr/local/hadoop/logs/hadoop-hadoop-datanode-Slave1.log | grep CID
  2016-03-06 18:56:24,584 WARN org.apache.hadoop.hdfs.server.common.Storage: java.io.IOException: Incompatible clusterIDs in /usr/local/hadoop/tmp/dfs/data: namenode clusterID = CID-f134a9fa-041e-46f8-8e03-6cf78226a9cd; datanode clusterID = CID-4e691c0a-1cb2-46fd-9793-734a9b9047cf
 
Open your usr/local/hadoop/dfs/datanode/current/VERSION file and change to:
  clusterID=CID-f134a9fa-041e-46f8-8e03-6cf78226a9cd

Deleting files in Azure Blob Storage with Azure CLI

Standard
azure login
azure storage account list
azure storage account set YourAccountName
export AZURE_STORAGE_CONNECTION_STRING='DefaultEndpointsProtocol=https;AccountName=YourAccountName;AccountKey=myDSCnRVxxxxxNCwcFkkxxxxxx4nbhV10unY2yFRHNGb8VHZLjg77F9WA=='
azure storage container show YourContainer
azure storage container set YourContainer
azure storage blob show YourContainer BlobName
azure storage blob delete YourContainer BlobName

You can address a blob in your storage account using the following URL format:
http://storage-account-name.blob.core.windows.net/container-name/blob-name

For example, here is a URL that addresses one of the blobs in the diagram above:
http://sally.blob.core.windows.net/movies/MOV1.AVI

Dynamic DynamoDB

Standard

http://dynamic-dynamodb.readthedocs.org/en/latest/cloudformation_template.html
https://aws.amazon.com/cn/blogs/aws/auto-scale-dynamodb-with-dynamic-dynamodb/

This example will configure Dynamic DynamoDB to:
- Scale up your DynamoDB table when the consumed reads 90% of the total provisioned reads
- Scale up your DynamoDB table when the consumed writes 90% of the total provisioned writes
- Scale up your reads with 50%
- Scale up your writes with 40%
- Scale down your DynamoDB table when the consumed reads 30% of the total provisioned reads
- Scale down your DynamoDB table when the consumed writes 40% of the total provisioned writes
- Scale down your reads with 40%
- Scale down your writes with 70%
- Check for changes every 5 minutes
 
Command:
dynamic-dynamodb --table-name my-table \
--reads-upper-threshold 90 \
--reads-lower-threshold 30 \
--increase-reads-with 50 \
--decrease-reads-with 40 \
--writes-upper-threshold 90 \
--writes-lower-threshold 40 \
--increase-writes-with 40 \
--decrease-writes-with 70 \
--check-interval 300

搞不懂AWS为何不在界面上自带着就把这东西做进去-_-

Alpine Linux LNMP安装配置

Standard

看到说Docker官方Image将会开始要使用它,所以装个看看呗。
下载地址:www.alpinelinux.org

apk add nginx memcached mysql mysql-client php php-dev php-cli php-pear php-phar php-fpm php-gd php-memcache php-json php-mysql php-pdo php-pdo_mysql php-mysqli php-zip php-zlib php-bz2 php-ctype php-mcrypt bash git;
apk add gcc g++ make autoconf;
 
#first running php -m on the command
#then compare the output to php -n -m
vi /usr/bin/pecl;  # 'exec $PHP -C -q $INCARG ... '
pecl install xdebug
 
vi /etc/php/php.ini;
...
zend_extension=/usr/lib/php/modules/xdebug.so
xdebug.remote_enable=1
xdebug.remote_port=9090
xdebug.remote_connect_back=1
xdebug.auto_trace=1
xdebug.collect_params=1
xdebug.collect_return=1
xdebug.profiler_enable=1
xdebug.var_display_max_data=10000
xdebug.var_display_max_depth=20
...
 
vi /etc/nginx/nginx.conf;
    ...
    location ~ \.php$ {                                                     
        root           html;                                                
        fastcgi_pass   127.0.0.1:9000;                                      
        fastcgi_index  index.php;                                           
        fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name; 
        include        fastcgi_params;                                      
    }    
    ...
 
vi /usr/share/nginx/html/index.php;
<?php
    phpinfo();
?>
 
apk add curl openssl php-openssl;
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer;
 
/etc/init.d/php-fpm start;
/etc/init.d/nginx start;
 
/etc/init.d/mariadb setup;
/etc/init.d/mariadb start;
/etc/init.d/memcached start;
 
apk add phpmyadmin;
cd /usr/share/nginx/html/;
cp -rf /usr/share/webapps/phpmyadmin ./;
cp /etc/phpmyadmin/config.inc.php ./phpmyadmin;
apk del phpmyadmin;
 
cd /usr/share/nginx/html;
wget https://github.com/jokkedk/webgrind/archive/master.zip;
unzip master.zip;
mv webgrind-master webgrind;
apk add python graphviz font-misc-misc;
 
cd /etc/profile.d;
cat color_prompt;
  # Setup a red prompt for root and a green one for users.
  # rename this file to color_prompt.sh to actually enable it
  NORMAL="\[\e[0m\]"
  RED="\[\e[1;31m\]"
  GREEN="\[\e[1;32m\]"
  if [ "$USER" = root ]; then
        PS1="$RED\h [$NORMAL\w$RED]# $NORMAL"
  else
        PS1="$GREEN\h [$NORMAL\w$GREEN]\$ $NORMAL"
  fi
 
mv color_prompt color_prompt.sh;
vi alias.sh;
  alias ll="ls -al"
  alias rm="rm -i"
 
#yemaosheng.com

SMS and phone call from Zabbix using Twilio

Standard


vim /etc/zabbix/alert.d/zabbix-alert-sms-twilio.sh

#!/usr/bin/python
import sys
from twilio.rest import TwilioRestClient
 
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC0axxxxxxxxxxxxxxxxxxxx296ae"
auth_token  = "93e4xxxxxxxxxxxxxxxxxxx63e9"
client = TwilioRestClient(account_sid, auth_token)
 
message = client.messages.create(body=sys.argv[2],
    to=sys.argv[1],
    from_="+16572338xx8") # Replace with your Twilio number
print message.sid

vim /etc/zabbix/alert.d/zabbix-alert-call-twilio.sh

#!/usr/bin/python
import sys
import urllib
from twilio.rest import TwilioRestClient
 
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC0axxxxxxxxxxxxxxxxxxxx296ae"
auth_token  = "93e4xxxxxxxxxxxxxxxxxxx63e9"
client = TwilioRestClient(account_sid, auth_token)
 
TwiML="http://yemaosheng.com/getTwiML.php?say="+urllib.quote(sys.argv[2])
 
call = client.calls.create(to=sys.argv[1],
    from_="+16572338xx8",
    url=TwiML)
print call.sid

vim /var/www/html/getTwiML.php

<?xml version="1.0" encoding="UTF-8"?>
<Response>
    <Say voice="woman"><?php echo htmlspecialchars($_GET['say']); ?></Say>
    <Hangup/>
</Response>

给同事们在外面应急时连AWS外区用

Standard
vim /etc/sysctl.conf
net.ipv4.ip_forward=1
 
echo 1 > /proc/sys/net/ipv4/ip_forward
 
iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 60022 -j DNAT --to-destination F.GFW.I.P:22
iptables -t nat -A POSTROUTING -j MASQUERADE

用Azure命令行配load balancer

Standard

https://azure.microsoft.com/en-us/documentation/articles/load-balancer-get-started-internet-arm-cli/
https://azure.microsoft.com/en-us/documentation/articles/load-balancer-get-started-ilb-arm-cli/
只是参照上面两篇敲一遍练练手.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
azure login
azure config mode arm
azure account set <Subscription ID>
 
azure group create ResourceGroupsName eastus2
 
azure network vnet create ResourceGroupsName ResourceGroupsName-Vnet eastus2 -a 10.0.0.0/16
azure network vnet subnet create ResourceGroupsName ResourceGroupsName-Vnet ResourceGroupsName-VnetSubnet -a 10.0.0.0/24
azure network public-ip create -g ResourceGroupsName -n ResourceGroupsName-PublicIP -l eastus2 -d rgn-esg -a static -i 4
azure network lb create ResourceGroupsName ResourceGroupsName-LB eastus2
azure network lb frontend-ip create ResourceGroupsName ResourceGroupsName-LB ResourceGroupsName-FrontendPool -i ResourceGroupsName-PublicIP
azure network lb address-pool create ResourceGroupsName ResourceGroupsName-LB ResourceGroupsName-BackendPool
 
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh1 -p tcp -f 62201 -b 22
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh2 -p tcp -f 62202 -b 22
 
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh3 -p tcp -f 62203 -b 22
 
azure network lb rule create ResourceGroupsName ResourceGroupsName-LB lbrule -p tcp -f 9200 -b 9200 -t ResourceGroupsName-FrontendPool -o ResourceGroupsName-BackendPool
azure network lb probe create -g ResourceGroupsName -l ResourceGroupsName-LB -n healthprobe -p "http" -o 9200 -f / -i 15 -c 4
 
azure network lb show ResourceGroupsName ResourceGroupsName-LB
 
azure network nic create -g ResourceGroupsName -n lb-nic1-be --subnet-name ResourceGroupsName-VnetSubnet --subnet-vnet-name ResourceGroupsName-Vnet -d "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/backendAddressPools/ResourceGroupsName-BackendPool" -e "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/inboundNatRules/ssh1" eastus2
azure network nic create -g ResourceGroupsName -n lb-nic2-be --subnet-name ResourceGroupsName-VnetSubnet --subnet-vnet-name ResourceGroupsName-Vnet -d "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/backendAddressPools/ResourceGroupsName-BackendPool" -e "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/inboundNatRules/ssh2" eastus2
 
azure vm create --resource-group ResourceGroupsName --name es1 --location eastus2 --vnet-name ResourceGroupsName-Vnet --vnet-subnet-name ResourceGroupsName-VnetSubnet --nic-name lb-nic1-be --availset-name ResourceGroupsName-AvSet --storage-account-name rgnesgdata -z Standard_D3_V2 --os-type Linux --image-urn Canonical:UbuntuServer:14.04.3-LTS:14.04.201512032
azure vm create --resource-group ResourceGroupsName --name es2 --location eastus2 --vnet-name ResourceGroupsName-Vnet --vnet-subnet-name ResourceGroupsName-VnetSubnet --nic-name lb-nic2-be --availset-name ResourceGroupsName-AvSet --storage-account-name rgnesgdata -z Standard_D3_V2 --os-type Linux --image-urn Canonical:UbuntuServer:14.04.3-LTS:14.04.201512032

科学上网的形式越来越严峻了

Standard

Cisco Anyconnect好像ZF不管,估计是因为国内外企用得多的原因吧.
找了个好用的docker.有docker是方便,省得自己配了.
https://github.com/TommyLau/docker-ocserv

~# docker run --name ocserv --privileged -p 443:443 -p 443:443/udp -d tommylau/ocserv
~# docker exec -ti ocserv ocpasswd -c /etc/ocserv/ocpasswd -d test
~# docker exec -ti ocserv ocpasswd -c /etc/ocserv/ocpasswd yemaosheng

另外还在Azure上搭了个Shadowsocks,试了试速度不是很快,但还能勉强看看别人的推文.

How to use AWS ElasticCache on Azure

Standard
[ec2-user@ip-174-129-100-10 ~]$ cat /etc/rc.local
echo "redis-name";
ADDRESS=`nslookup redis-name.7exo1h.0001.use1.cache.amazonaws.com | grep "Address:" | tail -n 1 | awk '{print $2}'`;
echo $ADDRESS;
iptables -t nat -A PREROUTING -i eth0 -p tcp -m tcp --dport 26379 -j DNAT --to-destination $ADDRESS:6379;
iptables -t nat -A POSTROUTING -j MASQUERADE;
sysctl net.ipv4.ip_forward=1;
 
[ec2-user@ip-174-129-100-10 ~]$ iptables -t nat -L
Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination         
DNAT       tcp  --  anywhere             anywhere             tcp dpt:26379 to:10.153.181.100:6379
 
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination         
MASQUERADE  all  --  anywhere             anywhere
azure@azure:/home/azure# cat /etc/rc.local
# redis-name
iptables -t nat -I OUTPUT -p tcp -d redis-name.7exo1h.0001.use1.cache.amazonaws.com --dport 6379  -j DNAT --to-destination 174.129.100.10:26379
 
azure@azure:/home/azure# iptables -t nat -L
Chain PREROUTING (policy ACCEPT)
target     prot opt source               destination         
 
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
 
Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         
DNAT       tcp  --  anywhere             ec2-50-17-68-100.compute-1.amazonaws.com  tcp dpt:6379 to:174.129.100.10:26379
 
Chain POSTROUTING (policy ACCEPT)
target     prot opt source               destination