Commit b8a872fa by Your Name

first commit

parents
[defaults]
inventory = /root/Ant_Devops/hosts
timeout=60
gathering = implicit
autocmd FileType yaml setlocal ts=2 sw=2
set ai!
set autoindent
set nu!
[jf-01]
192.168.1.117
[jf-02]
192.168.1.235
[jf-03]
192.168.1.204
[jf-04]
192.168.1.162
[jf-05]
192.168.1.249
[jf-1-3]
192.168.1.117
192.168.1.235
192.168.1.204
[jf-02]
[jf]
192.168.1.117
192.168.1.235
192.168.1.204
192.168.1.162
192.168.1.249
[all:vars]
ansible_ssh_pass=JSY@1q2w3e4r
ansible_ssh_user=root
ansible_ssh_port=19221
[sales]
192.168.1.163
192.168.1.38
192.168.1.65
192.168.1.33
[sales:vars]
ansible_ssh_pass=JSY1q2w3e4r
ansible_ssh_user=root
---
- name: "downloads activemq server"
unarchive:
copy: yes
src: "/software/{{ activemq }}"
dest: "{{ software }}"
#- name: "templates activemq.xml"
# template:
# src: activemq.xml
# dest: "/{{ software }}/{{ activemqdir }}/conf/activemq.xml"
- name: "run activemq server"
shell: "nohup bash /{{ software }}/{{ activemqdir }}/bin/activemq start &"
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- START SNIPPET: example -->
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- Allows us to use system properties as variables in this configuration file -->
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="locations">
<value>file:${activemq.conf}/credentials.properties</value>
</property>
</bean>
<!-- Allows accessing the server log -->
<bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<!--
The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" >
<!-- The constantPendingMessageLimitStrategy is used to prevent
slow topic consumers to block producers and affect other consumers
by limiting the number of messages that are retained
For more information, see:
http://activemq.apache.org/slow-consumer-handling.html
-->
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<!--
The managementContext is used to configure how ActiveMQ is exposed in
JMX. By default, ActiveMQ uses the MBean server that is started by
the JVM. For more information, see:
http://activemq.apache.org/jmx.html
-->
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<!--
Configure message persistence for the broker. The default persistence
mechanism is the KahaDB store (identified by the kahaDB tag).
For more information, see:
http://activemq.apache.org/persistence.html
-->
<persistenceAdapter>
<kahaDB directory="${activemq.data}/kahadb"/>
</persistenceAdapter>
<!--
The systemUsage controls the maximum amount of space the broker will
use before disabling caching and/or slowing down producers. For more information, see:
http://activemq.apache.org/producer-flow-control.html
-->
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<!--
The transport connectors expose ActiveMQ over a given protocol to
clients and other brokers. For more information, see:
http://activemq.apache.org/configuring-transports.html
-->
<transportConnectors>
<!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
<transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
<transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
<transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
<transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
<!--transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/-->
</transportConnectors>
<!-- destroy the spring context on shutdown to stop jetty -->
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
</shutdownHooks>
</broker>
<!--
Enable web consoles, REST and Ajax APIs and demos
The web consoles requires by default login, you can disable this in the jetty.xml file
Take a look at ${ACTIVEMQ_HOME}/conf/jetty.xml for more details
-->
<import resource="jetty.xml"/>
</beans>
<!-- END SNIPPET: example -->
---
- name: "Downloads git server"
unarchive:
copy: yes
src: "/software/{{ git }}"
dest: "{{ software }}"
# unarchive:
# src: https://mirrors.edge.kernel.org/pub/software/scm/git/git-2.9.5.tar.xz
# dest: /usr/local/
# remote_src: yes
- name: "install depen"
yum:
name:
- gcc-c++
- curl-devel
- expat-devel
- gettext-devel
- openssl-devel
- zlib-devel
- perl-ExtUtils-MakeMaker
- autoconf
state: latest
- name: "install git"
shell:
cd /usr/local/git-2.9.5/ && make configure && ./configure --prefix=/usr/local/git && make profix=/usr/local/git && make install
- name: set env
lineinfile:
dest: /etc/profile
line: "{{ item }}"
with_items:
- export PATH=$PATH:/usr/local/git/bin
#export NGINX_HOME=/usr/local/git
#export PATH=$PATH:$NGINX_HOME/sbin
---
- name: "Downloads JDK server"
unarchive:
copy: yes
src: "/software/{{ jdk }}"
dest: "/usr/local"
- name: "aes解密问题解决"
copy:
src: /software/UnlimitedJCEPolicyJDK8/local_policy.jar
dest: "{{ softwarebase }}/jdk1.8.0_202/jre/lib/security"
- name: "aes解密问题解决"
copy:
src: /software/UnlimitedJCEPolicyJDK8/US_export_policy.jar
dest: "{{ softwarebase }}/jdk1.8.0_202/jre/lib/security"
- name: set env
lineinfile:
dest: /etc/profile
line: "{{ item }}"
with_items:
- "export JAVA_HOME=/usr/local/{{ jdkdir }}"
- export CLASSPATH=.:${JAVA_HOME}/jre/lib/rt.jar:${JAVA_HOME}/lib/dt.jar:${JAVA_HOME}/lib/tools.jar
- export PATH=$PATH:${JAVA_HOME}/bin
<?xml version='1.1' encoding='UTF-8'?>
<flow-definition plugin="workflow-job@2.32">
<actions>
<org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobAction plugin="pipeline-model-definition@1.3.9"/>
<org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobPropertyTrackerAction plugin="pipeline-model-definition@1.3.9">
<jobProperties>
<string>org.jenkinsci.plugins.workflow.job.properties.DisableConcurrentBuildsJobProperty</string>
<string>jenkins.model.BuildDiscarderProperty</string>
</jobProperties>
<triggers/>
<parameters>
<string>autoRestart</string>
<string>BRANCH</string>
<string>buildDep</string>
<string>env</string>
</parameters>
<options/>
</org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobPropertyTrackerAction>
</actions>
<description></description>
<keepDependencies>false</keepDependencies>
<properties>
<hudson.model.ParametersDefinitionProperty>
<parameterDefinitions>
<hudson.model.BooleanParameterDefinition>
<name>autoRestart</name>
<description>自动重启</description>
<defaultValue>false</defaultValue>
</hudson.model.BooleanParameterDefinition>
<hudson.model.BooleanParameterDefinition>
<name>buildDep</name>
<description>编译其他 </description>
<defaultValue>false</defaultValue>
</hudson.model.BooleanParameterDefinition>
<hudson.model.ChoiceParameterDefinition>
<name>env</name>
<description></description>
<choices>
<string>kuka_qk_test</string>
<string>kuka_crm_test</string>
<string>docker</string>
<string>mayi_yun_test</string>
</choices>
</hudson.model.ChoiceParameterDefinition>
<net.uaznia.lukanus.hudson.plugins.gitparameter.GitParameterDefinition plugin="git-parameter@0.9.11">
<name>BRANCH</name>
<uuid>20e072af-6ccb-4848-845e-6669646b58ac</uuid>
<type>PT_BRANCH</type>
<tagFilter>*</tagFilter>
<branchFilter>origin/(.*)</branchFilter>
<defaultValue>product_dev</defaultValue>
<selectedValue>DEFAULT</selectedValue>
<listSize>5</listSize>
</net.uaznia.lukanus.hudson.plugins.gitparameter.GitParameterDefinition>
<com.cwctravel.hudson.plugins.extended__choice__parameter.ExtendedChoiceParameterDefinition plugin="extended-choice-parameter@0.78">
<name>module</name>
<description></description>
<quoteValue>false</quoteValue>
<saveJSONParameterToFile>false</saveJSONParameterToFile>
<visibleItemCount>10</visibleItemCount>
<type>PT_CHECKBOX</type>
<value>base-service/base-service-core,user-service/user-service-core,shop-api-service,goods-service/goods-service-core,order-service/order-service-core,app-service/app-service-core,promotion-service/promotion-service-core,message-service/message-service-core,info-service/info-service-core,schedule-service/schedule-service-core,third-party-service/third-party-service-core,customer-service/customer-service-core,cms-service/cms-service-core,gateway-service,distribution-service/distribution-service-core</value>
<defaultValue>base-service/base-service-core</defaultValue>
<multiSelectDelimiter>,</multiSelectDelimiter>
</com.cwctravel.hudson.plugins.extended__choice__parameter.ExtendedChoiceParameterDefinition>
</parameterDefinitions>
</hudson.model.ParametersDefinitionProperty>
<jenkins.model.BuildDiscarderProperty>
<strategy class="hudson.tasks.LogRotator">
<daysToKeep>-1</daysToKeep>
<numToKeep>3</numToKeep>
<artifactDaysToKeep>-1</artifactDaysToKeep>
<artifactNumToKeep>-1</artifactNumToKeep>
</strategy>
</jenkins.model.BuildDiscarderProperty>
<org.jenkinsci.plugins.workflow.job.properties.DisableConcurrentBuildsJobProperty/>
<hudson.plugins.jira.JiraProjectProperty plugin="jira@3.0.7"/>
<com.dabsquared.gitlabjenkins.connection.GitLabConnectionProperty plugin="gitlab-plugin@1.5.12">
<gitLabConnection></gitLabConnection>
</com.dabsquared.gitlabjenkins.connection.GitLabConnectionProperty>
<com.sonyericsson.rebuild.RebuildSettings plugin="rebuild@1.31">
<autoRebuild>false</autoRebuild>
<rebuildDisabled>false</rebuildDisabled>
</com.sonyericsson.rebuild.RebuildSettings>
</properties>
<definition class="org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition" plugin="workflow-cps@2.70">
<script>pipeline {
agent any
tools {
maven &apos;local-maven&apos;
}
parameters {
booleanParam defaultValue: false, description: &apos;自动重启&apos;, name: &apos;autoRestart&apos;
booleanParam defaultValue: false, description: &apos;编译其他 &apos;, name: &apos;buildDep&apos;
choice choices: [&apos;kuka_qk_test&apos;,&apos;kuka_crm_test&apos;,&apos;docker&apos;,&apos;mayi_yun_test&apos;], description: &apos;&apos;, name: &apos;env&apos;
gitParameter branchFilter: &apos;origin/(.*)&apos;, defaultValue: &apos;product_dev&apos;, name: &apos;BRANCH&apos;, selectedValue: &apos;DEFAULT&apos;,type: &apos;PT_BRANCH&apos;
}
// 配置项
options {
//timestamps
// 保留最后3个构件记录
buildDiscarder(logRotator(numToKeepStr: &apos;3&apos;))
// 不允许并行执行Pipeline,可用于防止同时访问共享资源等
disableConcurrentBuilds()
}
stages {
// 编译阶段
stage(&apos;SCM&apos;) {
steps {
git branch: &quot;${params.BRANCH}&quot;, credentialsId: &apos;2d3e8684-49ae-40c2-b762-fef4e6275ea3&apos;, url: &apos;http://code.mayi888.com/mayi888/mayi-service.git&apos;
}
}
// 发布阶段
stage(&apos;build dependencies proj&apos;) {
when {
expression { params.buildDep }
}
steps {
script {
if(params.env == &apos;kuka_crm_test&apos;){
withMaven(maven: &apos;maven&apos;, mavenSettingsFilePath: &apos;/opt/apache-maven-3.3.1/conf/settings.xml&apos;) {
build(job: &apos;gjdz_commons&apos;, parameters:[gitParameter(name: &apos;branch&apos;, value: &apos;crm_dev&apos;)])
}
} else {
withMaven(maven: &apos;maven&apos;, mavenSettingsFilePath: &apos;/opt/apache-maven-3.3.1/conf/settings.xml&apos;) {
build(job: &apos;commons&apos;, parameters:[gitParameter(name: &apos;branch&apos;, value: params.env==&apos;mayi_yun_test&apos;?&apos;product&apos;:&apos;master&apos;)])
}
}
}
}
}
// 编译阶段
stage(&apos;Build&apos;) {
steps {
withMaven(maven: &apos;maven&apos;, mavenSettingsFilePath: &apos;/opt/apache-maven-3.3.1/conf/settings.xml&apos;) {
sh &apos;java -version&apos;
sh &apos;mvn clean&apos;
sh &apos;echo ${env}-${module}&apos;
script {
if (params.env == &apos;docker&apos;) {
sh &apos;mvn -Dmaven.test.skip=true -e -pl ${module} -am package -Ddockerfile.skip=false dockerfile:push&apos;
// sh &apos;mvn -Dmaven.test.skip=true -e -pl ${module} -amd package docker:build -DpushImage&apos;
}
else {
sh &apos;mvn -Dmaven.test.skip=true -e -pl ${module} -am install&apos;
//sh &apos;mvn clean org.jacoco:jacoco-maven-plugin:prepare-agent -Dmaven.test.failure.ignore=true -e -pl ${module} -am install&apos;
sh &apos;mkdir -p servicePackages&apos;
sh &apos;rm -rf servicePackages/*&apos;
sh &apos;cp -r -f */**/target/*.jar servicePackages 2&gt;/dev/null || :&apos;
sh &apos;cp -r -f **/target/*.jar servicePackages 2&gt;/dev/null || :&apos;
sh &apos;ls -l /opt/tomcat/servicePackages&apos;
}
}
}
// jacoco execPattern: &apos;target/jacoco.exec&apos;
}
}
// 发布阶段
stage(&apos;Deploy Test Env&apos;) {
steps {
script{
if(params.env == &apos;kuka_crm_test&apos;){
//to test crm
sshPublisher(publishers: [sshPublisherDesc(configName: &apos;test-vm13&apos;, transfers: [sshTransfer(excludes: &apos;&apos;, execCommand: &apos;ls -l /opt/test/tomcat-test/servicePackages/gjdz&apos;, execTimeout: 0, flatten: false, makeEmptyDirs: false, noDefaultExcludes: false, patternSeparator: &apos;[, ]+&apos;, remoteDirectory: &apos;test/tomcat-test/servicePackages/gjdz&apos;, remoteDirectorySDF: false, removePrefix: &apos;servicePackages&apos;, sourceFiles: &apos;servicePackages/*&apos;)], usePromotionTimestamp: false, useWorkspaceInPromotion: false, verbose: false)])
} else if(params.env == &apos;mayi_yun_test&apos;){
echo &quot;test&quot;
sshPublisher(publishers: [sshPublisherDesc(configName: &apos;host18@vm02&apos;, transfers: [sshTransfer(cleanRemote: false, excludes: &apos;&apos;, execCommand: &apos;ls -l /opt/tomcat/servicePackages&apos;, execTimeout: 300000, flatten: false, makeEmptyDirs: false, noDefaultExcludes: false, patternSeparator: &apos;[, ]+&apos;, remoteDirectory: &apos;&apos;, remoteDirectorySDF: false, removePrefix: &apos;&apos;, sourceFiles: &apos;servicePackages/*&apos;)], usePromotionTimestamp: false, useWorkspaceInPromotion: false, verbose: false)])
}
else if(params.env == &apos;kuka_qk_test&apos;){
sshPublisher(publishers: [sshPublisherDesc(configName: &apos;test-vm22&apos;, transfers: [sshTransfer(excludes: &apos;&apos;, execCommand: &apos;ls -l /opt/test/tomcat-test/servicePackages&apos;, execTimeout: 0, flatten: false, makeEmptyDirs: false, noDefaultExcludes: false, patternSeparator: &apos;[, ]+&apos;, remoteDirectory: &apos;test/tomcat-test/servicePackages&apos;, remoteDirectorySDF: false, removePrefix: &apos;servicePackages&apos;, sourceFiles: &apos;servicePackages/*&apos;)], usePromotionTimestamp: false, useWorkspaceInPromotion: false, verbose: false)])
}
}
}
}
// 重启
stage(&apos;Restart Service&apos;) {
when {
expression { params.autoRestart }
}
steps {
script {
if (params.env == &apos;mayi_yun_test&apos;){
build job: &apos;test-restart-tomcat&apos;, parameters: [string(name: &apos;host&apos;, value: &apos;.18@v2&apos;), string(name: &apos;app&apos;, value: &apos;mayi-server&apos;), string(name: &apos;env&apos;, value: params.env), string(name: &apos;model&apos;, value: params.module)]
}else if(params.env == &apos;kuka_crm_test&apos;){
build job: &apos;gjdz_test-restart-tomcat&apos;, parameters: [string(name: &apos;host&apos;, value: &apos;test-vm13&apos;), string(name: &apos;app&apos;, value: &apos;mayi-server&apos;), string(name: &apos;env&apos;, value: params.env), string(name: &apos;model&apos;, value: params.module)]
}else if(params.env == &apos;kuka_qk_test&apos;){
}
else{
build job: &apos;test-restart-tomcat&apos;, parameters: [string(name: &apos;host&apos;, value: &apos;.29&apos;), string(name: &apos;app&apos;, value: &apos;mayi-server&apos;), string(name: &apos;env&apos;, value: params.env), string(name: &apos;model&apos;, value: params.module)]
}
}
}
}
}
post {
always {
echo &apos;结束&apos;
}
// 成功时触发
success{
echo &apos;成功&apos;
}
}
}</script>
<sandbox>true</sandbox>
</definition>
<triggers/>
<disabled>false</disabled>
</flow-definition>
\ No newline at end of file
---
- name: "Downloads maven server"
unarchive:
copy: yes
src: "/software/{{ maven }}"
dest: "{{ softwarebase }}"
# unarchive:
# src: https://mirrors.tuna.tsinghua.edu.cn/apache/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
# dest: /usr/local/
# remote_src: yes
- name: "copy mvn conf templates"
template:
src: config.xml
dest: "{{ softwaraebase }}/{{ mavendir }}/conf/config.xml"
- name: set env
lineinfile:
dest: /etc/profile
line: "{{ item }}"
with_items:
- "export MAVEN_HOME=/usr/local/{{ mavendir }}"
- export PATH=$PATH:${MAVEN_HOME}/bin
---
- name: "Downloads mongo server"
unarchive:
copy: yes
src: "/software/{{ mongo }}"
dest: "{{ software }}"
# unarchive:
# src: https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-4.0.9.tgz
# dest: /usr/local/
# remote_src: yes
#- name: "start mongo"
# shell: "/usr/local/mongo/sbin/mongo"
- name: create data dir
file:
path: "{{ item }}"
state: directory
with_items:
- /data/app-data/mongodb/
- /var/run/mongodb/
- /data/log/mongodb/
- "/{{ software }}/{{ mongodir }}/conf/"
- name: "templates mongo.conf"
template:
src: mongo.conf
dest: "/{{ software }}/{{ mongodir }}/conf/mongo.conf"
- name: set env
lineinfile:
dest: /etc/profile
line: "{{ item }}"
with_items:
- "export MONGO_HOME=/{{ software }}/{{ mongodir }}"
- export PATH=$PATH:$MONGO_HOME/bin
- name: start mongo
shell:
" /{{ software }}/{{ mongodir }}/bin/mongod -f /{{ software }}/{{ mongodir }}/conf/mongo.conf "
systemLog:
destination: file
logAppend: true
path: /data/log/mongodb/mongod.log
storage:
dbPath: /data/app-data/mongodb
journal:
enabled: true
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo
net:
port: 27017
bindIp: "{{ bindip }}"
replication:
replSetName: rs1
systemLog:
destination: file
logAppend: true
path: /data/log/mongodb/mongod.log
storage:
dbPath: /data/mongodb
journal:
enabled: true
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo
net:
port: 27017
# bindIp: "{{ bindip }}"
---
- name: create dir
file:
path: "{{ item }}"
state: directory
with_items:
- /opt/app
- /data/log/nginx/logs
- "/{{ software }}/nginx/conf/cert"
- "/{{ software }}/nginx/conf/conf.d"
- name: "Downloads nginx server"
unarchive:
copy: yes
src: "/software/{{ nginx }}"
dest: "{{ software }}"
- name: "install depen"
yum:
name:
- gcc
- pcre-devel
- openssl-devel
- zlib-devel
state: latest
- name: "install nginx"
shell:
"cd /{{ software }}/{{ nginxdir }}/ && ./configure --sbin-path=/usr/sbin/nginx --prefix=/{{ software }}/nginx --pid-path=/run/nginx.pid --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module --with-http_ssl_module --with-http_v2_module --add-module=modules/ngx_http_concat_module --add-module=modules/ngx_http_sysguard_module --add-module=modules/ngx_backtrace_module --add-module=modules/ngx_http_upstream_check_module --with-http_realip_module --with-pcre && make && make install"
- name: "templates nginx.conf"
template:
src: nginx.conf
dest: "/{{ software }}/nginx/conf/nginx.conf"
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
error_log /data/log/nginx/logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 64;
# log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
log_format gzip '$request_time $upstream_response_time $remote_addr - $upstream_addr [$time_local] '
'$host "$request" $status $bytes_sent '
'"$http_referer" "$http_user_agent" "$gzip_ratio" "$http_x_forwarded_for" - "$server_addr $cookie_aQQ_ajkguid"'
access_log /data/log/nginx/logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
include conf.d/*.conf;
}
---
- name: "downloads node_exporter server"
unarchive:
copy: yes
src: "/software/{{ node }}"
dest: "{{ softwarebase }}"
- name: "run node_exporter server"
shell: "nohup /{{ softwarebase }}/{{ nodedir }}/node_exporter &"
---
- name: "downloads redis server"
unarchive:
copy: yes
src: "/software/{{ redis }}"
dest: "{{ software }}"
# copy:
# src: /software/redis-5.0.3.tar.gz
# dest: /usr/local/redis-5.0.3.tar.gz
# get_url:
# url: http://download.redis.io/releases/redis-5.0.3.tar.gz
# dest: "/usr/local/"
- name: "install depen"
yum:
name:
- gcc
- gcc-c++
- libstdc++-devel
state: latest
- name: create data dir
file:
path: "{{ item }}"
state: directory
with_items:
- /opt/app
- /data/log/redis
- name: "make redis"
shell: "cd /{{ software }}/{{ redisdir }};make MALLOC=libc"
- name: "copy redis conf templates"
template:
src: redis.conf
dest: "{{ software }}/{{ redisdir }}/redis.conf"
- name: "run redis server"
shell: "nohup /{{ software }}/{{ redisdir }}/src/redis-server {{ software }}/{{ redisdir }}/redis.conf &"
bind {{ bindip }}
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/data/log/redis/redis.log"
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
bind {{ bindip }}
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
[zabbix]
name=Zabbix Official Repository - $basearch
baseurl=http://repo.zabbix.com/zabbix/4.2/rhel/7/$basearch/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX-A14FE591
[zabbix-non-supported]
name=Zabbix Official Repository non-supported - $basearch
baseurl=http://repo.zabbix.com/non-supported/rhel/7/$basearch/
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-ZABBIX
gpgcheck=0
---
- name: mv zabbix repo
copy:
src: zabbix.repo
dest: /etc/yum.repos.d/zabbix.repo
- name: install zabbix-agent
yum:
name: zabbix-agent
state: latest
- name: create log dir
file:
path: /data/log/zabbix/
state: directory
owner: zabbix
group: zabbix
- name: "move templates to zabbix agentd"
template:
src: zabbix_agentd.conf
dest: /etc/zabbix/zabbix_agentd.conf
- name: start zabbix-agent
service:
name: zabbix-agent
state: restarted
enabled: yes
# This is a configuration file for Zabbix agent daemon (Unix)
# To get more information about Zabbix, visit http://www.zabbix.com
############ GENERAL PARAMETERS #################
### Option: PidFile
# Name of PID file.
#
# Mandatory: no
# Default:
# PidFile=/tmp/zabbix_agentd.pid
PidFile=/var/run/zabbix/zabbix_agentd.pid
### Option: LogType
# Specifies where log messages are written to:
# system - syslog
# file - file specified with LogFile parameter
# console - standard output
#
# Mandatory: no
# Default:
# LogType=file
### Option: LogFile
# Log file name for LogType 'file' parameter.
#
# Mandatory: no
# Default:
# LogFile=
#LogFile=/data/log/zabbix/zabbix_agentd.log
LogFile=/var/log/zabbix/zabbix_agentd.log
### Option: LogFileSize
# Maximum size of log file in MB.
# 0 - disable automatic log rotation.
#
# Mandatory: no
# Range: 0-1024
# Default:
# LogFileSize=1
LogFileSize=0
### Option: DebugLevel
# Specifies debug level:
# 0 - basic information about starting and stopping of Zabbix processes
# 1 - critical information
# 2 - error information
# 3 - warnings
# 4 - for debugging (produces lots of information)
# 5 - extended debugging (produces even more information)
#
# Mandatory: no
# Range: 0-5
# Default:
# DebugLevel=3
### Option: SourceIP
# Source IP address for outgoing connections.
#
# Mandatory: no
# Default:
# SourceIP=
### Option: EnableRemoteCommands
# Whether remote commands from Zabbix server are allowed.
# 0 - not allowed
# 1 - allowed
#
# Mandatory: no
# Default:
# EnableRemoteCommands=0
### Option: LogRemoteCommands
# Enable logging of executed shell commands as warnings.
# 0 - disabled
# 1 - enabled
#
# Mandatory: no
# Default:
# LogRemoteCommands=0
##### Passive checks related
### Option: Server
# List of comma delimited IP addresses, optionally in CIDR notation, or hostnames of Zabbix servers and Zabbix proxies.
# Incoming connections will be accepted only from the hosts listed here.
# If IPv6 support is enabled then '127.0.0.1', '::127.0.0.1', '::ffff:127.0.0.1' are treated equally and '::/0' will allow any IPv4 or IPv6 address.
# '0.0.0.0/0' can be used to allow any IPv4 address.
# Example: Server=127.0.0.1,192.168.1.0/24,::1,2001:db8::/32,zabbix.domain
#
# Mandatory: yes, if StartAgents is not explicitly set to 0
# Default:
# Server=
Server=192.168.0.22
### Option: ListenPort
# Agent will listen on this port for connections from the server.
#
# Mandatory: no
# Range: 1024-32767
# Default:
# ListenPort=10050
### Option: ListenIP
# List of comma delimited IP addresses that the agent should listen on.
# First IP address is sent to Zabbix server if connecting to it to retrieve list of active checks.
#
# Mandatory: no
# Default:
# ListenIP=0.0.0.0
### Option: StartAgents
# Number of pre-forked instances of zabbix_agentd that process passive checks.
# If set to 0, disables passive checks and the agent will not listen on any TCP port.
#
# Mandatory: no
# Range: 0-100
# Default:
# StartAgents=3
##### Active checks related
### Option: ServerActive
# List of comma delimited IP:port (or hostname:port) pairs of Zabbix servers and Zabbix proxies for active checks.
# If port is not specified, default port is used.
# IPv6 addresses must be enclosed in square brackets if port for that host is specified.
# If port is not specified, square brackets for IPv6 addresses are optional.
# If this parameter is not specified, active checks are disabled.
# Example: ServerActive=127.0.0.1:20051,zabbix.domain,[::1]:30051,::1,[12fc::1]
#
# Mandatory: no
# Default:
# ServerActive=
#ServerActive=127.0.0.1
### Option: Hostname
# Unique, case sensitive hostname.
# Required for active checks and must match hostname as configured on the server.
# Value is acquired from HostnameItem if undefined.
#
# Mandatory: no
# Default:
# Hostname=
#Hostname=Zabbix server
### Option: HostnameItem
# Item used for generating Hostname if it is undefined. Ignored if Hostname is defined.
# Does not support UserParameters or aliases.
#
# Mandatory: no
# Default:
# HostnameItem=system.hostname
### Option: HostMetadata
# Optional parameter that defines host metadata.
# Host metadata is used at host auto-registration process.
# An agent will issue an error and not start if the value is over limit of 255 characters.
# If not defined, value will be acquired from HostMetadataItem.
#
# Mandatory: no
# Range: 0-255 characters
# Default:
# HostMetadata=
### Option: HostMetadataItem
# Optional parameter that defines an item used for getting host metadata.
# Host metadata is used at host auto-registration process.
# During an auto-registration request an agent will log a warning message if
# the value returned by specified item is over limit of 255 characters.
# This option is only used when HostMetadata is not defined.
#
# Mandatory: no
# Default:
# HostMetadataItem=
### Option: RefreshActiveChecks
# How often list of active checks is refreshed, in seconds.
#
# Mandatory: no
# Range: 60-3600
# Default:
# RefreshActiveChecks=120
### Option: BufferSend
# Do not keep data longer than N seconds in buffer.
#
# Mandatory: no
# Range: 1-3600
# Default:
# BufferSend=5
### Option: BufferSize
# Maximum number of values in a memory buffer. The agent will send
# all collected data to Zabbix Server or Proxy if the buffer is full.
#
# Mandatory: no
# Range: 2-65535
# Default:
# BufferSize=100
### Option: MaxLinesPerSecond
# Maximum number of new lines the agent will send per second to Zabbix Server
# or Proxy processing 'log' and 'logrt' active checks.
# The provided value will be overridden by the parameter 'maxlines',
# provided in 'log' or 'logrt' item keys.
#
# Mandatory: no
# Range: 1-1000
# Default:
# MaxLinesPerSecond=20
############ ADVANCED PARAMETERS #################
### Option: Alias
# Sets an alias for an item key. It can be used to substitute long and complex item key with a smaller and simpler one.
# Multiple Alias parameters may be present. Multiple parameters with the same Alias key are not allowed.
# Different Alias keys may reference the same item key.
# For example, to retrieve the ID of user 'zabbix':
# Alias=zabbix.userid:vfs.file.regexp[/etc/passwd,^zabbix:.:([0-9]+),,,,\1]
# Now shorthand key zabbix.userid may be used to retrieve data.
# Aliases can be used in HostMetadataItem but not in HostnameItem parameters.
#
# Mandatory: no
# Range:
# Default:
### Option: Timeout
# Spend no more than Timeout seconds on processing
#
# Mandatory: no
# Range: 1-30
# Default:
# Timeout=3
### Option: AllowRoot
# Allow the agent to run as 'root'. If disabled and the agent is started by 'root', the agent
# will try to switch to the user specified by the User configuration option instead.
# Has no effect if started under a regular user.
# 0 - do not allow
# 1 - allow
#
# Mandatory: no
# Default:
# AllowRoot=0
### Option: User
# Drop privileges to a specific, existing user on the system.
# Only has effect if run as 'root' and AllowRoot is disabled.
#
# Mandatory: no
# Default:
# User=zabbix
### Option: Include
# You may include individual files or all files in a directory in the configuration file.
# Installing Zabbix will create include directory in /usr/local/etc, unless modified during the compile time.
#
# Mandatory: no
# Default:
# Include=
Include=/etc/zabbix/zabbix_agentd.d/*.conf
# Include=/usr/local/etc/zabbix_agentd.userparams.conf
# Include=/usr/local/etc/zabbix_agentd.conf.d/
# Include=/usr/local/etc/zabbix_agentd.conf.d/*.conf
####### USER-DEFINED MONITORED PARAMETERS #######
### Option: UnsafeUserParameters
# Allow all characters to be passed in arguments to user-defined parameters.
# The following characters are not allowed:
# \ ' " ` * ? [ ] { } ~ $ ! & ; ( ) < > | # @
# Additionally, newline characters are not allowed.
# 0 - do not allow
# 1 - allow
#
# Mandatory: no
# Range: 0-1
# Default:
# UnsafeUserParameters=0
### Option: UserParameter
# User-defined parameter to monitor. There can be several user-defined parameters.
# Format: UserParameter=<key>,<shell command>
# See 'zabbix_agentd' directory for examples.
#
# Mandatory: no
# Default:
# UserParameter=
####### LOADABLE MODULES #######
### Option: LoadModulePath
# Full path to location of agent modules.
# Default depends on compilation options.
#
# Mandatory: no
# Default:
# LoadModulePath=${libdir}/modules
### Option: LoadModule
# Module to load at agent startup. Modules are used to extend functionality of the agent.
# Format: LoadModule=<module.so>
# The modules must be located in directory specified by LoadModulePath.
# It is allowed to include multiple LoadModule parameters.
#
# Mandatory: no
# Default:
# LoadModule=
####### TLS-RELATED PARAMETERS #######
### Option: TLSConnect
# How the agent should connect to server or proxy. Used for active checks.
# Only one value can be specified:
# unencrypted - connect without encryption
# psk - connect using TLS and a pre-shared key
# cert - connect using TLS and a certificate
#
# Mandatory: yes, if TLS certificate or PSK parameters are defined (even for 'unencrypted' connection)
# Default:
# TLSConnect=unencrypted
### Option: TLSAccept
# What incoming connections to accept.
# Multiple values can be specified, separated by comma:
# unencrypted - accept connections without encryption
# psk - accept connections secured with TLS and a pre-shared key
# cert - accept connections secured with TLS and a certificate
#
# Mandatory: yes, if TLS certificate or PSK parameters are defined (even for 'unencrypted' connection)
# Default:
# TLSAccept=unencrypted
### Option: TLSCAFile
# Full pathname of a file containing the top-level CA(s) certificates for
# peer certificate verification.
#
# Mandatory: no
# Default:
# TLSCAFile=
### Option: TLSCRLFile
# Full pathname of a file containing revoked certificates.
#
# Mandatory: no
# Default:
# TLSCRLFile=
### Option: TLSServerCertIssuer
# Allowed server certificate issuer.
#
# Mandatory: no
# Default:
# TLSServerCertIssuer=
### Option: TLSServerCertSubject
# Allowed server certificate subject.
#
# Mandatory: no
# Default:
# TLSServerCertSubject=
### Option: TLSCertFile
# Full pathname of a file containing the agent certificate or certificate chain.
#
# Mandatory: no
# Default:
# TLSCertFile=
### Option: TLSKeyFile
# Full pathname of a file containing the agent private key.
#
# Mandatory: no
# Default:
# TLSKeyFile=
### Option: TLSPSKIdentity
# Unique, case sensitive string used to identify the pre-shared key.
#
# Mandatory: no
# Default:
# TLSPSKIdentity=
### Option: TLSPSKFile
# Full pathname of a file containing the pre-shared key.
#
# Mandatory: no
# Default:
# TLSPSKFile=
---
- name: create data dir
file:
path: "{{ item }}"
state: directory
with_items:
- /opt/app
- /data/zookeeper
- /data/log/zookeeper
- name: "downloads zookeeper server"
# get_url:
# url: https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/stable/zookeeper-3.4.14.tar.gz
# dest: "/usr/local/"
unarchive:
copy: yes
src: "/software/{{ zookeeper }}"
dest: "{{ software }}"
- name: "copy zk conf templates"
template:
src: zoo.cfg
dest: "{{ software }}/{{ zookeeperdir }}/conf/zoo.cfg"
- name: "run zk server"
shell: "bash {{ software }}/{{ zookeeperdir }}/bin/zkServer.sh start &"
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data/app-data/zookeeper
dataLogDir=/data/log/zookeeper/zookeeper.log
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
192.168.1.204
---
- name: test
hosts: jf-03
remote_user: root
vars_files:
- var/main.yml
roles:
- activemq
---
#zabbix
zabbix_server: 172.16.0.42
#bin
#software dir
bindip: "{{ ansible_eth0['ipv4']['address'] }}"
software: /opt/app
#base dir
#
softwarebase: /usr/local
#nginx work dir
ngwork: /usr/local/nginx/conf/conf.d
zabbix_agent_packages:
- zabbix-agent
#mysql args
login_password: 1q2w3e4r
mysql_port: 3306
#node_exproter
node: node_exporter-0.18.1.linux-amd64.tar.gz
nodedir: node_exporter-0.18.1.linux-amd64
#redis version
redis: redis-5.0.3.tar.gz
redisdir: redis-5.0.3
#activemq
activemq: apache-activemq-5.15.9-bin.tar.gz
activemqdir: apache-activemq-5.15.9
#activemq: apache-activemq-5.9.0.zip
#activemqdir: apache-activemq-5.9.0
#zookeeper
zookeeper: zookeeper-3.4.14.tar.gz
zookeeperdir: zookeeper-3.4.14
#nginx
nginx: tengine-2.3.1.tar.gz
nginxdir: tengine-2.3.1
#git
git: git-2.9.5.tar.xz
gitdir: git-2.9.5
#jdk
jdk: jdk-8u202-linux-x64.tar.gz
jdkdir: jdk1.8.0_202
#maven
maven: apache-maven-3.6.1-bin.tar.gz
mavendir: apache-maven-3.6.1
#mongo
mongo: mongodb-linux-x86_64-4.0.9.tgz
mongodir: mongodb-linux-x86_64-4.0.9
#nacos
nacos: nacos-server-1.0.0.tar.gz
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment