网站指向不同路径的nginx配置

Standard
upstream fastcgi_backend {
    server 127.0.0.1:9000;
    server unix:/var/run/php5-fpm.sock;
    keepalive 10;
}
...
server {
  ...
  ...
  location ~ \.php {
    set $php_root /var/www/website/abc/public;
    include /etc/nginx/fastcgi_params;
 
    if ($request_uri ~ /(api/user/photos|api/user/posts) ) {
      set $php_root /var/www/website/rest/public;
    }
 
    if ($request_uri ~ /api/user/(photos|posts|links) ) {
      set $php_root /var/www/website/rest/public;
    }
 
    fastcgi_split_path_info ^(.+\.php)(/.+)$;
    fastcgi_param PATH_INFO $fastcgi_path_info;
    fastcgi_param PATH_TRANSLATED $php_root$fastcgi_path_info;
    fastcgi_param SCRIPT_NAME $fastcgi_script_name;
    fastcgi_param SCRIPT_FILENAME $php_root$fastcgi_script_name;
    fastcgi_pass fastcgi_backend;
    fastcgi_index index.php;
  }
  ...
}

Jenkins安装配置

Standard

详细参考:http://jenkins-php.org

apt-get install default-jdk
wget -q -O - https://jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -
sh -c 'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list'
apt-get update
apt-get install jenkins
 
java -jar jenkins-cli.jar -s http://localhost:8080 install-plugin git phing checkstyle cloverphp crap4j dry htmlpublisher jdepend plot pmd violations warnings xunit
java -jar jenkins-cli.jar -s http://localhost:8080 safe-restart
 
apt-get install phpunit phpcodesniffer phploc pdepen phpmd phpcpd phpdox php5-xdebug libxml2-utils
1.
curl -L https://raw.githubusercontent.com/sebastianbergmann/php-jenkins-template/master/config.xml | java -jar jenkins-cli.jar -s http://localhost:8080 create-job php-template
 
Or add the template manually:
 
cd $JENKINS_HOME/jobs
mkdir php-template
cd php-template
wget https://raw.github.com/sebastianbergmann/php-jenkins-template/master/config.xml
cd ..
chown -R jenkins:jenkins php-template/
Reload Jenkins configuration, for instance using the Jenkins CLI:
java -jar jenkins-cli.jar -s http://localhost:8080 reload-configuration
 
2.Click on "New Job".
3.Enter a "Job name".
4.Select "Copy existing job" and enter "php-template" into the "Copy from" field.
5.Click "OK".
6.Uncheck the "Disable Build" option.
7.Fill in your "Source Code Management" information.
8.Configure a "Build Trigger", for instance "Poll SCM".
9.Click "Save".

Hive安装配置

Standard

Hadoop集群安装配置

wget http://mirrors.gigenet.com/apache/hive/stable-2/apache-hive-2.0.0-bin.tar.gz
 
tar zxf apache-hive-2.0.0-bin.tar.gz
mv apache-hive-2.0.0 /usr/local/
cd /usr/local
mv apache-hive-2.0.0 hive
 
cat /etc/profile
...
HIVE_HOME=/usr/local/hive
PATH=$PATH:$HIVE_HOME/bin
export HIVE_HOME PATH
...
 
cd /usr/local/hive/conf
cp hive-default.xml.template hive-default.xml
vi /usr/local/hive/conf/hive-site.xml
<configuration>
        <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://Master:3306/hive?createDatabaseIfNotExist=true</value>
        <description>JDBC connect string for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
        <description>Driver class name for a JDBC metastore</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>hive<value>
        <description>username to use against metastore database</description>
        </property>
        <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>hive</value>
        <description>password to use against metastore database</description>
        </property>
</configuration>
mysql>CREATE USER 'hive' IDENTIFIED BY 'hive';
mysql>GRANT ALL PRIVILEGES ON *.* TO 'hive'@'hadoop-master' WITH GRANT OPTION;
mysql>FLUSH privileges;
mysql>CREATE DATABASE hive;
cd ~
wget http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.38.tar.gz
tar zxf mysql-connector-java-5.1.38.tar.gz
cd mysql-connector-java-5.1.38
cp mysql-connector-java-5.1.38-bin.jar /usr/local/hive/lib/
 
scp -r /usr/local/hive Slave1:/usr/local/
scp -r /usr/local/hive Slave2:/usr/local/
#create the schema
schematool -initSchema -dbType mysql
 
#for client on Slave1,2
hive --service metastore &
 
hive
hive> show databases;

#Slave1,2

cat /usr/local/hive/conf/hive-site.xml
<configuration>
    <property>  
        <name>hive.metastore.uris</name>  
        <value>thrift://Master:9083</value>
    </property>
</configuration>

Alpine Linux LNMP安装配置

Standard

看到说Docker官方Image将会开始要使用它,所以装个看看呗。
下载地址:www.alpinelinux.org

apk add nginx memcached mysql mysql-client php php-dev php-cli php-pear php-phar php-fpm php-gd php-memcache php-json php-mysql php-pdo php-pdo_mysql php-mysqli php-zip php-zlib php-bz2 php-ctype php-mcrypt bash git;
apk add gcc g++ make autoconf;
 
#first running php -m on the command
#then compare the output to php -n -m
vi /usr/bin/pecl;  # 'exec $PHP -C -q $INCARG ... '
pecl install xdebug
 
vi /etc/php/php.ini;
...
zend_extension=/usr/lib/php/modules/xdebug.so
xdebug.remote_enable=1
xdebug.remote_port=9090
xdebug.remote_connect_back=1
xdebug.auto_trace=1
xdebug.collect_params=1
xdebug.collect_return=1
xdebug.profiler_enable=1
xdebug.var_display_max_data=10000
xdebug.var_display_max_depth=20
...
 
vi /etc/nginx/nginx.conf;
    ...
    location ~ \.php$ {                                                     
        root           html;                                                
        fastcgi_pass   127.0.0.1:9000;                                      
        fastcgi_index  index.php;                                           
        fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name; 
        include        fastcgi_params;                                      
    }    
    ...
 
vi /usr/share/nginx/html/index.php;
<?php
    phpinfo();
?>
 
apk add curl openssl php-openssl;
curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer;
 
/etc/init.d/php-fpm start;
/etc/init.d/nginx start;
 
/etc/init.d/mariadb setup;
/etc/init.d/mariadb start;
/etc/init.d/memcached start;
 
apk add phpmyadmin;
cd /usr/share/nginx/html/;
cp -rf /usr/share/webapps/phpmyadmin ./;
cp /etc/phpmyadmin/config.inc.php ./phpmyadmin;
apk del phpmyadmin;
 
cd /usr/share/nginx/html;
wget https://github.com/jokkedk/webgrind/archive/master.zip;
unzip master.zip;
mv webgrind-master webgrind;
apk add python graphviz font-misc-misc;
 
cd /etc/profile.d;
cat color_prompt;
  # Setup a red prompt for root and a green one for users.
  # rename this file to color_prompt.sh to actually enable it
  NORMAL="\[\e[0m\]"
  RED="\[\e[1;31m\]"
  GREEN="\[\e[1;32m\]"
  if [ "$USER" = root ]; then
        PS1="$RED\h [$NORMAL\w$RED]# $NORMAL"
  else
        PS1="$GREEN\h [$NORMAL\w$GREEN]\$ $NORMAL"
  fi
 
mv color_prompt color_prompt.sh;
vi alias.sh;
  alias ll="ls -al"
  alias rm="rm -i"
 
#yemaosheng.com

用Azure命令行配load balancer

Standard

https://azure.microsoft.com/en-us/documentation/articles/load-balancer-get-started-internet-arm-cli/
https://azure.microsoft.com/en-us/documentation/articles/load-balancer-get-started-ilb-arm-cli/
只是参照上面两篇敲一遍练练手.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
azure login
azure config mode arm
azure account set <Subscription ID>
 
azure group create ResourceGroupsName eastus2
 
azure network vnet create ResourceGroupsName ResourceGroupsName-Vnet eastus2 -a 10.0.0.0/16
azure network vnet subnet create ResourceGroupsName ResourceGroupsName-Vnet ResourceGroupsName-VnetSubnet -a 10.0.0.0/24
azure network public-ip create -g ResourceGroupsName -n ResourceGroupsName-PublicIP -l eastus2 -d rgn-esg -a static -i 4
azure network lb create ResourceGroupsName ResourceGroupsName-LB eastus2
azure network lb frontend-ip create ResourceGroupsName ResourceGroupsName-LB ResourceGroupsName-FrontendPool -i ResourceGroupsName-PublicIP
azure network lb address-pool create ResourceGroupsName ResourceGroupsName-LB ResourceGroupsName-BackendPool
 
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh1 -p tcp -f 62201 -b 22
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh2 -p tcp -f 62202 -b 22
 
azure network lb inbound-nat-rule create -g ResourceGroupsName -l ResourceGroupsName-LB -n ssh3 -p tcp -f 62203 -b 22
 
azure network lb rule create ResourceGroupsName ResourceGroupsName-LB lbrule -p tcp -f 9200 -b 9200 -t ResourceGroupsName-FrontendPool -o ResourceGroupsName-BackendPool
azure network lb probe create -g ResourceGroupsName -l ResourceGroupsName-LB -n healthprobe -p "http" -o 9200 -f / -i 15 -c 4
 
azure network lb show ResourceGroupsName ResourceGroupsName-LB
 
azure network nic create -g ResourceGroupsName -n lb-nic1-be --subnet-name ResourceGroupsName-VnetSubnet --subnet-vnet-name ResourceGroupsName-Vnet -d "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/backendAddressPools/ResourceGroupsName-BackendPool" -e "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/inboundNatRules/ssh1" eastus2
azure network nic create -g ResourceGroupsName -n lb-nic2-be --subnet-name ResourceGroupsName-VnetSubnet --subnet-vnet-name ResourceGroupsName-Vnet -d "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/backendAddressPools/ResourceGroupsName-BackendPool" -e "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ResourceGroupsName/providers/Microsoft.Network/loadBalancers/ResourceGroupsName-LB/inboundNatRules/ssh2" eastus2
 
azure vm create --resource-group ResourceGroupsName --name es1 --location eastus2 --vnet-name ResourceGroupsName-Vnet --vnet-subnet-name ResourceGroupsName-VnetSubnet --nic-name lb-nic1-be --availset-name ResourceGroupsName-AvSet --storage-account-name rgnesgdata -z Standard_D3_V2 --os-type Linux --image-urn Canonical:UbuntuServer:14.04.3-LTS:14.04.201512032
azure vm create --resource-group ResourceGroupsName --name es2 --location eastus2 --vnet-name ResourceGroupsName-Vnet --vnet-subnet-name ResourceGroupsName-VnetSubnet --nic-name lb-nic2-be --availset-name ResourceGroupsName-AvSet --storage-account-name rgnesgdata -z Standard_D3_V2 --os-type Linux --image-urn Canonical:UbuntuServer:14.04.3-LTS:14.04.201512032

Sublime常用

Standard

常用插件:
BracketHighlighter
CodeIntel
ConverToUTF8
DocBlockr
Emmet
SideBarEnhancements

Preferences.sublime-settings:

{
	"auto_complete_delay": 300,
	"default_encoding": "UTF-8",
	"default_line_ending": "unix",
	"font-size": 12,
	"font_face": "microsoft yahei",
	"highlight_line": true,
	"draw_minimap_border": true,
	"always_show_minimap_viewport": true,
	"ignored_packages":
	[
		"Vintage"
	],
	"line_padding_bottom": 1,
	"line_padding_top": 1,
	"show_encoding": true,
	"tab_size": 4,
	"translate_tabs_to_spaces": true,
	"update_check": false,
	"word_wrap": false
}

Puppet安装配置使用

Standard

记得以前看过,貌似感觉cfengin比较好,不过既然老外那边已经用了这个,那花些时间再看看熟熟手。

vi /etc/hosts
192.168.0.10 puppetserver
192.168.0.11 puppetclient1
#yemaosheng.com
yum -y install ruby ruby-libs ruby-rdoc
wget http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-7.noarch.rpm
yum install puppetlabs-release-6-7.noarch.rpm
[server] yum -y install puppet-server
[server] service puppetmaster start
 
[client] yum -y install puppet
[client] puppet agent --no-daemonize --onetime --verbose --debug --server=puppetserver
 
[server] puppet cert list --all #会看到一个前面没有+号的client key
[server] puppet cert --sign puppetclient1 #确认client的key

在puppetclient1上新建一个helloworld.txt文件

[server] vi /etc/puppet/manifests/site.pp
node default {
  file {
    "/tmp/helloworld.txt": content => "hello, world";
  }
}
[client] puppet agent --test --server=puppetserver

在puppetclient1上安装tree

[server] vi /etc/puppet/manifests/site.pp
import 'nodes.pp'
[server] vi /etc/puppet/manifests/nodes.pp
node 'puppetclient1' {
      include tree
}
[server] vi /etc/puppet/puppet.conf
modulepath=/etc/puppet/modules:/var/lib/puppet/modules:/opt/modules
[server] mkdir -p /etc/puppet/modules/tree/{files,manifests,templates}
[server] vi /etc/puppet/modules/tree/manifests/init.pp
class tree {
  package { tree:
    ensure => present,
  }
  #类型 { 标题:
  #  属性 => 值,
  #  }
  #}
}
[client] puppet agent --no-daemonize --verbose --debug --server=puppetserver

在puppetclient1上安装memcached并同步配置文件

[server] mkdir -p /etc/puppet/modules/memcached/{files,manifests,templates}
[server] cp /etc/sysconfig/memcached /etc/puppet/modules/memcached/files/
[server] vi /etc/puppet/modules/memcached/manifests/init.pp
class memcached {
  package { memcached:
    ensure => present,
  }
  file { '/etc/sysconfig/memcached':
    notify => Service['memcached']
    mode => '644',
    owner => 'root',
    group => 'root',
    source => 'puppet:///modules/memcached/memcached',
  }
  service { 'memcached':
    ensure => 'running',
    enable => 'true',
  }
}
[client] puppet agent --server=puppetserver

从master主动发指命让agent同步

[client] vi /etc/puppet/auth.conf
path /
auth any
allow *
 
[client] vi /etc/puppet/puppet.conf
...
listen = true
...
 
[server] puppet kick --host puppetclient1

安装配置Mcollective

[server] yum -y install activemq mcollective mcollective-client
[server] vi /etc/activemq/activemq.xml #修改默认用户名及密码
[server] service atcivemq start
[server] vi /etc/mcollective/server.cfg #修改plugin.stomp.*默认值连接到activemq
[server] service mcollective start
 
[server] mkdir -p /etc/puppet/modules/mcollective/{files,manifests,templates}
[server] cp /etc/mcollective/server.cfg /etc/puppet/modules/mcollective/files/
[server] chmod 644 /etc/puppet/modules/mcollective/files/server.cfg
[server] vi /etc/puppet/modules/mcollective/manifests/init.pp
class mcollective {
  package { mcollective:
    ensure => present,
  }
  file { '/etc/mcollective/server.cfg':
    notify => Service['mcollective']
    mode => '640',
    owner => 'root',
    group => 'root',
    source => 'puppet:///modules/mcollective/server.cfg',
  }
  service { 'mcollective':
    ensure => 'running',
    enable => 'true',
  }
}
 
[server] mco find
puppetserver
puppetclient1
 
[client] yum -y install mcollective-service-agent
[client] service mcollective restart
[server] yum -y install mcollective-service-client
[server] mco rpc service start service=httpd -v #启动puppetclient1机上的httpd服务
[server] mco rpc service stop service=httpd -v #停止puppetclient1机上的httpd服务
 
[client] yum -y install mcollective-puppet-agent
[client] service mcollective restart
[server] yum -y install mcollective-puppet-client
[server] mco puppet status

安装Dashboard

[server] yum -y install puppet-dashboard
mysql> create database dashboard default charset utf8;
mysql> use dashboard;
mysql> grant all on dashboard.* to dashboard@localhost identified by 'dashboard' ;
mysql> flush privileges;
[server] vi /usr/share/puppet-dashboard/config/database.yml
production:
  database: dashboard
  username: dashboard
  password: dashboard
  encoding: utf8
  adapter: mysql
  #用ruby的rake命令创建数据库和创建表
[server] cd /usr/share/puppet-dashboard/
[server] rake RAILS_ENV=production db:create
[server] rake RAILS_ENV=production db:migrate
 
#运行puppet-dashboard
[server] /usr/share/puppet-dashboard/script/server -e production
#或是
[server] /etc/init.d/puppet-dashboard start
#这样将用3000端口运行自带的webrick服务器。

官方文档:http://docs.puppetlabs.com/puppet/3/reference/
中文wiki:http://puppet.wikidot.com

Cisco复习(帧中继)

Standard


帧中继

Router0>en
Router0#conf t
 
Router0(config)#int fa0/1
Router0(config-if)#ip add 172.16.1.1 255.255.255.0
Router0(config-if)#no shut
 
Router0(config-if)#int serial0/1/0
Router0(config-if)#encapsulation frame-relay //进行frame-relay封装
Router0(config-if)#no shut
 
Router0(config-if)#int serial0/1/0.1 point-to-point //子接口配置
Router0(config-subif)#ip address 192.168.3.2 255.255.255.0
Router0(config-subif)#description Link Router2 DLCI 41 //添加描述备注
Router0(config-subif)#frame-replay interface-dlci 20 //配置DLCI
 
Router0(config-subif)#int serial0/1/0.2 point-to-point
Router0(config-subif)#ip address 192.168.2.2 255.255.255.0
Router0(config-subif)#description Link Router1 DLCI 31
Router0(config-subif)#frame-replay interface-dlci 21
...
Router0(config)#router eigrp 100
Router0(config-router)#network 172.16.0.0
Router0(config-router)#network 192.168.3.0
Router0(config-router)#network 192.168.2.0
Router0(config-router)#end
//--------------------------
Router1>en
Router1#conf t
 
Router1(config)#int fa0/1
Router1(config-if)#ip add 172.17.1.1 255.255.255.0
Router1(config-if)#no shut
 
Router1(config-if)#int serial0/1/0
Router1(config-if)#encapsulation frame-relay
Router1(config-if)#no shut
 
Router1(config-if)#int serial0/1/0.1 point-to-point
Router1(config-subif)#ip address 192.168.1.2 255.255.255.0
Router1(config-subif)#description Link Router2 DLCI 40
Router1(config-subif)#frame-replay interface-dlci 30
 
Router1(config-subif)#int serial0/1/0.2 point-to-point
Router1(config-subif)#ip address 192.168.2.1 255.255.255.0
Router1(config-subif)#description Link Router0 DLCI 21
Router1(config-subif)#frame-replay interface-dlci 31
...
Router1(config)#router eigrp 100
Router1(config-router)#network 172.17.0.0
Router1(config-router)#network 192.168.1.0
Router1(config-router)#network 192.168.2.0
Router1(config-router)#end
//--------------------------
Router2>en
Router2#conf t
 
Router2(config)#int fa0/1
Router2(config-if)#ip add 172.18.1.1 255.255.255.0
Router2(config-if)#no shut
 
Router2(config-if)#int serial0/1/0
Router2(config-if)#encapsulation frame-relay
Router2(config-if)#no shut
 
Router2(config-if)#int serial0/1/0.1 point-to-point
Router2(config-subif)#ip address 192.168.1.1 255.255.255.0
Router2(config-subif)#description Link Router1 DLCI 30
Router2(config-subif)#frame-replay interface-dlci 40
 
Router2(config-subif)#int serial0/1/0.2 point-to-point
Router2(config-subif)#ip address 192.168.3.1 255.255.255.0
Router2(config-subif)#description Link Router0 DLCI 20
Router2(config-subif)#frame-replay interface-dlci 41
Router2(config-subif)#end
...
Router2(config)#router eigrp 100
Router2(config-router)#network 172.18.0.0
Router2(config-router)#network 192.168.3.0
Router2(config-router)#network 192.168.1.0
Router2(config-router)#end

转载请注明出处:http://yemaosheng.com

Cisco复习(VOIP)

Standard


VOIP

CM#conf t
CM(config)#int fa 0/1
CM(config-if)#ip add 192.168.10.1 255.255.255.0
CM(config-if)#no shut
CM(config-if)#exit
CM(config)#ip dhcp pool voip
CM(dhcp-config)#network 192.168.10.0 255.255.255.0
CM(dhcp-config)#default-router 192.168.10.1
CM(dhcp-config)#option 150 ip 192.168.10.1
CM(dhcp-config)#exit
 
CM(config)#telephony-service
CM(config-telephony)#max-ephones 30 //定义最大电话数
CM(config-telephony)#max-dh 30 //定义最大目录号
CM(config-telephony)#ip source-address 192.168.10.1 port 2000 //IP电话充通信IP及端口
CM(config-telephony)#create cnf-files //建个xml文件记录每个电话的配置信息
CM(config)#ephone-dn 1 //设逻辑电话目录号
CM(config-ephone-dn)#number 1001 //电话号1001
CM(config)#ephone-dn 2 //设逻辑电话目录号
CM(config-ephone-dn)#number 1002 //电话号1001
CM(config)#ephone-dn 3 //设逻辑电话目录号
CM(config-ephone-dn)#number 1003 //电话号1001
CM(config)#ephone-dn 4 //设逻辑电话目录号
CM(config-ephone-dn)#number 1004 //电话号1001
CM(config)#ephone-dn 5 //设逻辑电话目录号
CM(config-ephone-dn)#number 1005 //电话号1001
CM(config)#ephone 1 //物理电话配置
CM(config-ephone)#mac-address 000D.BD7D.0C91
CM(config-ephone)#type CIPC //CIPC是软电话,7960是CISCO硬件电话,ata是模拟电话
CM(config-ephone)#button 1:1 //第一位数字你可以把它当vlan来理解,后面一个数字对应的是电话目录号
CM(config)#ephone 2
CM(config-ephone)#mac-address 0002.4A15.3CD7
CM(config-ephone)#type 7960
CM(config-ephone)#button 1:2
CM(config)#ephone 3
CM(config-ephone)#mac-address 000A.F34D.1A01
CM(config-ephone)#type ata
CM(config-ephone)#button 1:3
CM(config)#ephone 4
CM(config-ephone)#mac-address 00E0.A354.AA97
CM(config-ephone)#type CIPC
CM(config-ephone)#button 1:4
CM(config)#ephone 5
CM(config-ephone)#mac-address 0060.7035.0745
CM(config-ephone)#type CIPC
CM(config-ephone)#button 1:5
 
SW#conf t
SW(config)#int range fa 0/10-20
SW(config-if-range)#switchport mode access
SW(config-if-range)#switchport voice vlan 1
SW(config-if-range)#no shut
SW(config-if-range)#exit

转载请注明出处:http://yemaosheng.com