Compare commits

..

4 Commits

Author SHA1 Message Date
bd2656037b 增加 2025-12-12 00:20:13 +08:00
dxin
6f7b24926d 更改filebast配置 2025-12-11 15:17:03 +08:00
dxin
7ad9956c5d 更改filebast 2025-12-11 11:11:16 +08:00
8288aad918 新增lessie-react 2025-12-02 23:16:38 +08:00
17 changed files with 740 additions and 16 deletions

143
ES/单节点/安装es.conf Normal file
View File

@@ -0,0 +1,143 @@
# 前置 & 准备工作
sudo dnf update -y
sudo dnf install -y nano wget curl unzip
# 安全组防火墙开放9200端口、5601端口
# 安装 Elasticsearch 9.2.2
# 导入官方 GPG key
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
# 新建 yum repo 文件
sudo tee /etc/yum.repos.d/elasticsearch.repo <<-'EOF'
[elasticsearch]
name=Elasticsearch repository for 9.x packages
baseurl=https://artifacts.elastic.co/packages/9.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
# 安装 Elasticsearch
sudo dnf install elasticsearch --enablerepo=elasticsearch
# 先不管直接启动、报错再查看日志,有可能是权限问题
sudo systemctl daemon-reload
sudo systemctl enable elasticsearch
sudo systemctl start elasticsearch
sudo systemctl status elasticsearch
sudo journalctl -u elasticsearch -f
# 手动创建日志目录 + 设置权限
sudo mkdir -p /usr/share/elasticsearch/logs
sudo chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/logs
sudo chmod 750 /usr/share/elasticsearch/logs
# 设置 elastic 超级用户密码 (推荐立即设定)
sudo /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
# 查看自签名证书,有则正常
ll /etc/elasticsearch/certs/
# 查看 HTTP CA 证书指纹(用于其他客户端配置)
sudo openssl x509 -fingerprint -sha256 -in /etc/elasticsearch/certs/http_ca.crt -noout
# 设置环境变量(替换为你的实际密码)
export ELASTIC_PASSWORD='MyElastic123!'
# 测试 HTTPS 请求(必须用 --cacert因启用了 TLS
curl --cacert /etc/elasticsearch/certs/http_ca.crt \
-u elastic:$ELASTIC_PASSWORD \
https://localhost:9200
# 查看默认的配置文件
grep -v '^\s*#\|^\s*$' /etc/elasticsearch/elasticsearch.yml
# 按实际情况修改配置文件集群名、非本地访问等
cluster.name: my-test-es
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
xpack.security.enabled: true
xpack.security.enrollment.enabled: true
xpack.security.http.ssl:
enabled: true
keystore.path: certs/http.p12
xpack.security.transport.ssl:
enabled: true
verification_mode: certificate
keystore.path: certs/transport.p12
truststore.path: certs/transport.p12
cluster.initial_master_nodes: ["weblessie-server-02"]
http.host: 0.0.0.0
# 更改es的jvm大小
vim /etc/elasticsearch/jvm.options
-Xms4g
-Xmx4g
# 重启
sudo systemctl restart elasticsearch
# 准备token后续在Kibana中使用
sudo /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana
# 准备安装 Kibana 9.2.2
# 新建 repo /etc/yum.repos.d/kibana.repo
sudo tee /etc/yum.repos.d/kibana.repo <<-'EOF'
[kibana]
name=Kibana repository for 9.x packages
baseurl=https://artifacts.elastic.co/packages/9.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
# 安装 Kibana
sudo dnf install kibana --enablerepo=kibana
# 启动
sudo systemctl daemon-reload
sudo systemctl enable --now kibana
# 访问 Kibana输入生成的token
http://ip:5601
# 获取 “verification code”
/usr/share/kibana/bin/kibana-verification-code
# 使用官方工具生成加密密钥(最规范)
sudo /usr/share/kibana/bin/kibana-encryption-keys generate --force
# 输出应类似:
# ✔ Encryption keys generated and written to /etc/kibana/kibana.yml:
# xpack.encryptedSavedObjects.encryptionKey
# xpack.reporting.encryptionKey
# xpack.security.encryptionKey
# 修改配置文件
grep -v '^\s*#\|^\s*$' /etc/kibana/kibana.yml
server.host: "0.0.0.0"
logging:
appenders:
file:
type: file
fileName: /var/log/kibana/kibana.log
layout:
type: json
root:
appenders:
- default
- file
pid.file: /run/kibana/kibana.pid
i18n.locale: "zh-CN"
elasticsearch.hosts: [https://10.0.0.38:9200]
elasticsearch.serviceAccountToken: AAEAAWVsYXN0aWMva2liYW5hL2Vucm9sbC1wcm9jZXNzLXRva2VuLTE3NjUzNDE4OTI3MjY6Um9KdUo2N1hSZVNPeGNzOXFDaUh2dw
elasticsearch.ssl.certificateAuthorities: [/var/lib/kibana/ca_1765341893683.crt]
xpack.fleet.outputs: [{id: fleet-default-output, name: default, is_default: true, is_default_monitoring: true, type: elasticsearch, hosts: [https://10.0.0.38:9200], ca_trusted_fingerprint: 80af64db043e12ebda11c10f70042af91306a705fdcb6285814a84b420c734a5}]
xpack.encryptedSavedObjects.encryptionKey: f10166c761265d5ca61e7fa2c1acac73
xpack.reporting.encryptionKey: 1772a5152522675d5a38470e905b2817
xpack.security.encryptionKey: d4b30e82e47f530a998e29cb0b8e5295

View File

@@ -0,0 +1,41 @@
# 获取ES 的证书指纹
sudo openssl x509 -fingerprint -sha256 -in /etc/elasticsearch/certs/http_ca.crt -noout
sha256 Fingerprint=80:AF:64:DB:04:3E:12:EB:DA:11:C1:0F:70:04:2A:F9:13:06:A7:05:FD:CB:62:85:81:4A:84:B4:20:C7:34:A5
# kibana web创建的用户
admin
G7ZSKFM4AQwHQpwA
# Filebeat
output.elasticsearch:
hosts: ["https://49.51.33.153:9200"]
username: "elastic"
password: "-0NiIBOJGn2CATuPWzNc"
# 用指纹验证(代替证书文件)
ssl.verification_mode: "certificate"
ssl.certificate_authorities: [] # 留空(不校验完整链)
ssl.supported_protocols: [TLSv1.2, TLSv1.3]
# 关键:指定 CA 指纹(必须全大写,无 0x带冒号
ssl.ca_trusted_fingerprint: "80AF64DB043E12EBDA11C10F70042AF91306A705FD2CB6285814A84B420C734A5"
# python
from elasticsearch import Elasticsearch
es = Elasticsearch(
hosts=["https://49.51.33.153:9200"],
basic_auth=("elastic", "-0NiIBOJGn2CATuPWzNc"),
# 指纹必须去掉冒号,全大写
ssl_assert_fingerprint="80AF64DB043E12EBDA11C10F70042AF91306A705FD2CB6285814A84B420C734A5",
verify_certs=True # 必须为 True
)
print(es.info())

View File

@@ -18,11 +18,11 @@ processors:
when:
equals:
log_type: admin.log
tokenizer: '%{timestamp} [%{thread}] %{log_level} %{log_message}'
tokenizer: '%{timestamp} %{level} %{pid} --- [%{thread}] %{class} : [%{app_name->}] %{message}'
field: "message"
target_prefix: "parsed_sys_info"
target_prefix: "mylog"
ignore_missing: true
overwrite_keys: false
overwrite_keys: true

View File

@@ -2,14 +2,14 @@
id: us_pord_01_flymoon-admin
enabled: true
paths:
- /root/logs/flymoon-admin/sys-info.log
- /root/logs/flymoon-admin/app.log
fields:
application: flymoon-admin # 自定义字段,标识应用名称
log_type: admin.log # 自定义字段,标识日志类型
environment: us-pord # 自定义字段,标识机器环境名称
instance: us-prod-01 # 自定义字段,标识机器名称
fields_under_root: true
multiline.pattern: '^\d{2}:\d{2}:\d{2}\.\d{3}' # 针对info的日志格式
multiline.pattern: '^\d{4}-\d{2}-\d{2}-\d{2}:\d{2}:\d{2}\.\d{3}'
multiline.negate: true
multiline.match: after
ignore_older: 24h # 忽略旧日志文件(避免处理已归档的日志)

View File

@@ -27,7 +27,7 @@ processors:
when:
equals:
log_type: email.log
tokenizer: '%{timestamp} [%{thread}] %{level} %{class} - [%{method_line}] - %{message}'
tokenizer: '%{timestamp} %{level} %{pid} --- \\[%{thread}\\] %{message}'
field: "message"
target_prefix: "mylog"
ignore_missing: true
@@ -37,7 +37,7 @@ processors:
when:
equals:
log_type: agent.log
tokenizer: '%{timestamp} %{level} - [%{method},%{line}] - %{message}'
tokenizer: '%{timestamp} %{level} %{pid} --- [%{thread}] %{class} : [%{app_name->}] %{message}'
field: "message"
target_prefix: "mylog"
ignore_missing: true
@@ -45,6 +45,7 @@ processors:
#输出
output.elasticsearch:
hosts: ["http://106.53.194.199:9200"]

View File

@@ -9,7 +9,7 @@
environment: us-pord # 自定义字段,标识机器环境名称
instance: us-prod-02 # 自定义字段,标识机器名称
fields_under_root: true
multiline.pattern: '^\d{2}:\d{2}:\d{2}\.\d{3}' # 针对email的sys-info.log的日志格式多行
multiline.pattern: '^\d{4}-\d{2}-\d{2}-\d{2}:\d{2}:\d{2}\.\d{3}'
multiline.negate: true
multiline.match: after
ignore_older: 24h # 忽略旧日志文件(避免处理已归档的日志)

View File

@@ -20,4 +20,3 @@
start_position: beginning # 从文件的开头读取

View File

@@ -25,18 +25,20 @@ processors:
- dissect:
when:
equals:
log_type: payment.log
tokenizer: '%{timestamp} [%{thread}] %{level} %{class} - [%{method},%{line}] - %{message}'
log_type: agent.log
tokenizer: '%{timestamp} %{level} %{pid} --- [%{thread}] %{class} : [%{app_name->}] %{message}'
field: "message"
target_prefix: "mylog"
ignore_missing: true
overwrite_keys: true
- dissect:
when:
equals:
log_type: agent.log
tokenizer: '%{timestamp} %{level} - [%{method},%{line}] - %{message}'
log_type: payment.log
tokenizer: '%{timestamp} %{level} %{pid} --- [%{thread}] %{class} : [%{app_name->}] %{message}'
field: "message"
target_prefix: "mylog"
ignore_missing: true
@@ -44,6 +46,7 @@ processors:
#输出
output.elasticsearch:
hosts: ["http://106.53.194.199:9200"]

View File

@@ -9,7 +9,7 @@
environment: us-pord # 自定义字段,标识机器环境名称
instance: us-prod-03 # 自定义字段,标识机器名称
fields_under_root: true
multiline.pattern: '^\d{2}:\d{2}:\d{2}\.\d{3}' # 针对email的sys-info.log的日志格式多行
multiline.pattern: '^\d{4}-\d{2}-\d{2}-\d{2}:\d{2}:\d{2}\.\d{3}'
multiline.negate: true
multiline.match: after
ignore_older: 24h # 忽略旧日志文件(避免处理已归档的日志)

View File

@@ -2,14 +2,14 @@
id: us_pord_03_flymoon-payment
enabled: true
paths:
- /root/logs/flymoon-payment/sys-info.log
- /root/logs/flymoon-payment/app.log
fields:
application: flymoon-payment
log_type: payment.log
environment: us-pord
instance: us-prod-03
fields_under_root: true
multiline.pattern: '^\d{2}:\d{2}:\d{2}\.\d{3}'
multiline.pattern: '^\d{4}-\d{2}-\d{2}-\d{2}:\d{2}:\d{2}\.\d{3}'
multiline.negate: true
multiline.match: after
ignore_older: 24h # 忽略旧日志文件(避免处理已归档的日志)

View File

@@ -0,0 +1,66 @@
pipeline {
agent any
parameters {
gitParameter(
branchFilter: 'origin/(.*)',
defaultValue: 'release',
name: 'GIT_BRANCH',
type: 'PT_BRANCH_TAG',
selectedValue: 'DEFAULT',
sortMode: 'NONE',
description: '选择代码分支: ',
quickFilterEnabled: true,
tagFilter: '*',
listSize: "5"
)
}
environment {
REMOTE_HOST = '192.168.70.15'
REMOTE_PROJECT_PATH = '/data/webapps/lessie-react'
}
stages {
stage('Checkout 代码') {
steps {
git branch: "${params.GIT_BRANCH}", credentialsId: 'fly_gitlab_auth', url: 'http://172.24.16.20/web/lessie-react.git'
}
}
stage('同步') {
steps {
sh """
ssh ${REMOTE_HOST} 'mkdir -p ${REMOTE_PROJECT_PATH}'
rsync -avz --delete --exclude='node_modules' ${WORKSPACE}/ ${REMOTE_HOST}:${REMOTE_PROJECT_PATH}/
"""
}
}
stage('安装启动') {
steps {
sh """
ssh ${REMOTE_HOST} '
cd ${REMOTE_PROJECT_PATH} &&
pm2 delete lessie-react || true &&
pm2 list &&
nvm use 22.21.1 &&
npm install &&
npm run build &&
pm2 start ecosystem.config.cjs &&
pm2 save
'
"""
}
}
}
post {
success {
echo '部署成功'
}
failure {
echo '部署失败,请检查日志'
}
}
}

View File

@@ -0,0 +1,93 @@
pipeline {
agent any
parameters {
gitParameter(
branchFilter: 'origin/(.*)',
defaultValue: 'release',
name: 'GIT_BRANCH',
type: 'PT_BRANCH_TAG',
selectedValue: 'DEFAULT',
sortMode: 'NONE',
description: '选择代码分支: ',
quickFilterEnabled: true,
tagFilter: '*',
listSize: "5"
)
}
environment {
REMOTE_HOST = '43.130.56.138'
REMOTE_HOST_B = '43.153.21.64'
REMOTE_PROJECT_PATH = '/data/webapps/lessie-react'
}
stages {
stage('Checkout 代码') {
steps {
git branch: "${params.GIT_BRANCH}", credentialsId: 'fly_gitlab_auth', url: 'http://172.24.16.20/web/lessie-react.git'
}
}
stage('同步A') {
steps {
sh """
ssh ${REMOTE_HOST} 'mkdir -p ${REMOTE_PROJECT_PATH}'
rsync -avz --delete --exclude='node_modules' ${WORKSPACE}/ ${REMOTE_HOST}:${REMOTE_PROJECT_PATH}/
"""
}
}
stage('安装启动A') {
steps {
sh """
ssh ${REMOTE_HOST} '
cd ${REMOTE_PROJECT_PATH} &&
pm2 delete lessie-react || true &&
pm2 list &&
nvm use 22.21.1 &&
npm install &&
npm run build &&
pm2 start ecosystem.config.cjs &&
pm2 save
'
"""
}
}
stage('同步B') {
steps {
sh """
ssh ${REMOTE_HOST_B} 'mkdir -p ${REMOTE_PROJECT_PATH}'
rsync -avz --delete --exclude='node_modules' ${WORKSPACE}/ ${REMOTE_HOST_B}:${REMOTE_PROJECT_PATH}/
"""
}
}
stage('安装启动B') {
steps {
sh """
ssh ${REMOTE_HOST_B} '
cd ${REMOTE_PROJECT_PATH} &&
pm2 delete lessie-react || true &&
pm2 list &&
nvm use 22.21.1 &&
npm install &&
npm run build &&
pm2 start ecosystem.config.cjs &&
pm2 save
'
"""
}
}
}
post {
success {
echo '部署成功'
}
failure {
echo '部署失败,请检查日志'
}
}
}

43
nginx/es.jennie.im.conf Normal file
View File

@@ -0,0 +1,43 @@
server {
listen 80;
server_name es.jennie.im;
# 强制跳转 HTTPS
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name es.jennie.im;
# 证书
ssl_certificate /data/tengine/conf/certificate/jennie.im.crt;
ssl_certificate_key /data/tengine/conf/certificate/jennie.im.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# 推荐安全配置
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
access_log /data/tengine/logs/es_jennie_im_access.log;
error_log /data/tengine/logs/es_jennie_im_error.log;
location / {
proxy_pass https://10.0.0.38:9200; # ES 内网地址HTTPS
# 关闭后端证书校验(必须,否则 Nginx 不认 ES 自签证书)
proxy_ssl_verify off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
# ES 大响应时需要提高 buffer
proxy_buffer_size 16k;
proxy_buffers 4 32k;
proxy_busy_buffers_size 64k;
}
}

View File

@@ -0,0 +1,43 @@
server {
listen 80;
server_name kibana.jennie.im;
# 强制跳转到 HTTPS
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
server_name kibana.jennie.im;
# 公网 HTTPS 证书
ssl_certificate /data/tengine/conf/certificate/jennie.im.crt;
ssl_certificate_key /data/tengine/conf/certificate/jennie.im.key;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
access_log /data/tengine/logs/kibana_jennie_im_access.log;
error_log /data/tengine/logs/kibana_jennie_im_error.log;
# Kibana 的反代配置
location / {
proxy_pass http://10.0.0.38:5601;
proxy_http_version 1.1;
proxy_set_header Connection "keep-alive";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
# 防止 WebSocket 断开Kibana 控制台需要)
proxy_read_timeout 300s;
proxy_send_timeout 300s;
}
}

View File

@@ -0,0 +1,69 @@
upstream profile_backend {
server 10.0.0.5:3001; # 机器A的内网地址
server 10.0.0.15:3001; # 机器B的内网地址
}
log_format profile_log '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'upstream_addr=$upstream_addr '
'upstream_status=$upstream_status '
'upstream_response_time=$upstream_response_time '
'request_time=$request_time';
# 1. 强制 HTTP 转 HTTPS统一跳转到 https://profile.lessie.ai
server {
listen 80;
server_name profile.lessie.ai;
return 301 https://profile.lessie.ai$request_uri;
}
# 2. 正式服务站点https://profile.lessie.ai
server {
listen 443 ssl;
server_name profile.lessie.ai;
ssl_certificate /data/tengine/certificate/lessie.ai.pem;
ssl_certificate_key /data/tengine/certificate/lessie.ai.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
access_log /data/tengine/logs/lessie_profile_log.access.log profile_log;
error_log /data/tengine/logs/lessie_profile_log.error.log;
# 反向代理到后端服务器渲染的nxut项目3001端口
location / {
proxy_pass http://profile_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
# 禁止logo走缓存
location = /favicon.svg {
proxy_pass http://official_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
add_header Cache-Control "no-cache, no-store, must-revalidate" always;
add_header Pragma "no-cache" always;
add_header Expires 0 always;
}
# 禁止logo走缓存
location = /favicon.ico {
proxy_pass http://official_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
add_header Cache-Control "no-cache, no-store, must-revalidate" always;
add_header Pragma "no-cache" always;
add_header Expires 0 always;
}
}

196
nginx/安装.md Normal file
View File

@@ -0,0 +1,196 @@
1、下载安装包`https://tengine.taobao.org/`
2、上传安装位置在`/data/tengine`
3、解压缩`tar -zxvf tengine-3.1.0.tar.gz`
4、安装编译环境`yum -y install gcc-c++`
5、安装依赖`yum -y install pcre-devel zlib zlib-devel openssl openssl-devel`
6、创建安装目录`mkdir /data/tengine`
7、进入解压好的文件夹`cd tengine-3.1.0`
8、执行并指定安装路径`./configure --prefix=/data/tengine``make``make install`
```编译
./configure --prefix=/data/tengine \
--conf-path=/data/tengine/conf/nginx.conf \
--error-log-path=/data/tengine/logs/error.log \
--http-log-path=/data/tengine/logs/access.log \
--pid-path=/data/tengine/logs/nginx.pid \
--lock-path=/data/tengine/logs/nginx.lock \
--with-http_ssl_module \
--with-http_gzip_static_module \
--with-pcre \
--with-http_stub_status_module
#解释:
--prefix=/data/tengine → 指定安装到 /data/tengine/
--conf-path=/data/tengine/conf/nginx.conf → 指定 nginx.conf 配置文件位置
--error-log-path=/data/tengine/logs/error.log → 错误日志存放目录
--http-log-path=/data/tengine/logs/access.log → 访问日志存放目录
--pid-path=/data/tengine/logs/nginx.pid → 指定 nginx 进程 ID 存放路径
--lock-path=/data/tengine/logs/nginx.lock → 进程锁文件路径
--with-http_ssl_module → 开启 HTTPS 支持
--with-http_gzip_static_module → 开启 Gzip 压缩
--with-pcre → 支持 正则表达式(用于 Rewrite
--with-http_stub_status_module → 启用 Nginx 状态监控
#安装
make -j$(nproc)
make install
#=============加上四层代理==============
./configure --prefix=/data/tengine \
--conf-path=/data/tengine/conf/nginx.conf \
--error-log-path=/data/tengine/logs/error.log \
--http-log-path=/data/tengine/logs/access.log \
--pid-path=/data/tengine/logs/nginx.pid \
--lock-path=/data/tengine/logs/nginx.lock \
--with-http_ssl_module \
--with-http_gzip_static_module \
--with-pcre \
--with-http_stub_status_module \
--with-stream
```
9、查看目录是否安装成功`ls /data/tengine`
10、启动tengine`cd /data/tengine/sbin``./nginx`
11、添加后续目录
mkdir -p /data/tengine/conf/vhosts
mkdir -p /data/tengine/conf/certificate
`/data/tengine/conf/nginx.conf``http {}` 块cc的内添加引用虚拟主机
http {
include mime.types;
default_type application/octet-stream;
# 引入虚拟主机配置
include /data/tengine/conf/vhosts/*.conf;
# 其他配置...
}
---
/data/tengine/sbin/nginx
/data/tengine/sbin/nginx -s reload
1、启动命令 2、重新加载配置文件命令
全局使用nginx
方式一
1. 执行以下命令创建软链接:
bash
```bash
ln -s /data/tengine/sbin/nginx /usr/local/bin/nginx
```
`/usr/local/bin` 通常已在系统环境变量 `$PATH` 中,优先选择此目录)
2. 验证是否生效:
bash
```bash
nginx -v #
```
方式二
1. 编辑环境变量配置文件(以 `bash` 为例):
bash
```bash
vi /etc/profile # 全局生效(所有用户),或编辑 ~/.bashrc当前用户
```
2. 在文件末尾添加一行,将 Nginx 所在目录加入 `PATH`
bash
```bash
export PATH=$PATH:/data/tengine/sbin
```
3. 使配置立即生效:
bash
```bash
source /etc/profile # 对应全局配置文件,或 source ~/.bashrc
```
4. 验证:
bash
```bash
nginx -v # 直接执行命令测试
```
---
配置nginx systemctl
```gitlab
vim /etc/systemd/system/tengine.service
[Unit]
Description=Tengine Web Server
After=network.target
[Service]
Type=forking
PIDFile=/data/tengine/logs/nginx.pid
ExecStart=/data/tengine/sbin/nginx
ExecReload=/data/tengine/sbin/nginx -s reload
ExecStop=/data/tengine/sbin/nginx -s stop
# 防止被 killall/nginx 杀掉
KillMode=process
# 自动重启(如果你希望 Nginx 意外退出后自动拉起)
Restart=on-failure
RestartSec=2s
[Install]
WantedBy=multi-user.target
```
```gitlab
检查配置 使用 nginx -t
热加载 使用 systemctl reload tengine
启动服务 使用 systemctl start tengine
停止服务 使用 systemctl stop tengine
重启服务 使用 systemctl restart tengine
紧急操作 使用 nginx -s reload/stop
```
容器的:
docker exec -it my-nginx nginx -t # 检查配置文件语法
docker exec -it my-nginx nginx -s reload # 重载配置

View File

@@ -341,3 +341,30 @@ Swap: 8.0Gi 3.9Gi 4.1Gi 从工作进程上分析,从内
3、相比上次查询哪个子进程没了哪个子进程出现了
====2025-10-01 10:01:00 ====
k8s 日志采集
背景一个项目有完整的前后端pod但是有部署多套测试环境。
比如s1环境、s2环境、s3环境、s4环境、s5环境、s6环境...
以s1环境为例总共有如下pod使用deployment部署。
s1-flymoon-admin-7cf5fcf447-t7p7n
s1-flymoon-admin-web-756b79567d-whllw
s1-flymoon-agent-66485d7b4-mrnqq
s1-flymoon-email-868c885b79-dvsjc
s1-flymoon-payment-84f7fdbfcb-94bhl
s1-lessie-agents-59797c5464-4vwfr
s1-lessie-ai-web-5c86b8d944-vmv72
s1-lessie-go-api-774ddc644c-m4cqc
然后s2环境其中其他flymoon基础付服务使用s1环境的pod通过svc访问过去
s2-lessie-agents-69798c5414-1hvfr
s2-lessie-ai-web-9c8988d914-mrv72
s2-lessie-go-api-47498c641c-4cpqc
s3~s6环境与s2环境类似只有各自的lessie-agents、lessie-ai-web、lessie-go-api
现在我需要采集日志到es中这个es是k8s外部部署的怎么采集日志呢
daemonSet方式每个node运行一个采集器采集该节点的pod的日志使用什么采集器怎么配置能自动发现pod日志能处理日志比如分词json分词
那每个节点上的采集器的pod怎么准确采集呢比如s1环境的s1-lessie-agents pod它有多个副本被调度在不同node上那么采集器怎么采集到A node的s1-lessie-agents 和 B node的s1-lessie-agents pod的日志到同一个es索引中的呢
另外es的索引名称应该如何命名呢设置生命周期、索引模板、按天or周or月分割索引