centos7
图形化启动: 
systemctl set-default graphical.target 			(开启图形界面) 执行以后,输入命令   reboot
systemctl set-default multi-user.target 		(关闭图形界面) 执行以后,输入命令 reboot 

kafka-topics.sh --list --zookeeper localhost:2181
kafka-topics.sh --list --zookeeper 10.96.118.56:2181

kafka-topics.sh --create --zookeeper 172.18.38.123:2182 --replication-factor 1 --partitions 1 --topic ttACL1
172.18.32.241
## kafka
## 查询topic，进入kafka目录：
kafka-topics.sh --list --zookeeper 172.18.37.241:2182
## 创建topic
kafka-topics.sh --create --zookeeper 172.18.38.123:2182 --replication-factor 1 --partitions 1 --topic ttACL1

kafka-console-consumer.sh --bootstrap-server 172.18.38.123:9093 --topic ttACL1 --from-beginning
kafka-console-producer.sh --broker-list 10.51.254.252:9092 --topic ttACL1


kafka-console-consumer.sh --bootstrap-server 172.18.38.123:2182 --topic syslog_ngfw1 --from-beginning
## 详细信息：
bin/kafka-topics.sh --describe --zookeeper 10.51.254.252:2181 --topic k8s-log
kafka-topics.sh --create --zookeeper 172.18.37.241:2181 --replication-factor 1 --partitions 1 --topic syslog_ngfw1

172.17.0.1 			10.51.254.252
docker exec -it kafka /bin/bash

bash /work/services/configure-service/kafkaDockerRestart.sh 172.18.32.242
kafka-topics.sh --create --zookeeper 172.18.32.242:2181 --replication-factor 1 --partitions 1 --topic ttACL77

kafka-acls.sh --authorizer-properties zookeeper.connect=10.51.254.252:2181 --operation Write --allow-principal User:* --allow-host 172.17.0.1 --add --topic kafka_csplus

kafka-acls.sh --authorizer-properties zookeeper.connect=10.51.254.252:2181 --operation Read --allow-principal User:* --allow-host 172.17.0.1 --add --topic kafka_csplus


kafka-console-producer.sh --broker-list 10.51.254.252:9092 --topic log-notice

kafka-console-consumer.sh --bootstrap-server 10.51.254.252:9092 --from-beginning --topic log-notice

kafka-console-consumer.sh --bootstrap-server 172.18.37.241:9092 --from-beginning --topic log_csplus
kafka-console-producer.sh --broker-list 172.18.37.241:9092  --topic log_csplus

kafka-console-consumer.sh --bootstrap-server 172.18.37.242:9092 --topic log_csplus --from-beginning

kafka-acls.sh --authorizer-properties zookeeper.connect=172.18.37.240:2181 --list --topic log_csplus

kafka-topics.sh  --delete --zookeeper 10.51.254.252:2181  --topic testACL

#ssh免密命令
#卸载失效的密钥:
ssh-keygen -R 172.18.32.240
#生成密钥
ssh-keygen
#发送到其他节点
ssh-copy-id 172.18.32.240
#scp命令
scp -r clickhouseConfig/ root@172.18.37.242:/opt/

## zookeeper
sh /opt/zookeeper/bin/zkServer.sh stop
sh /opt/zookeeper/bin/zkServer.sh restart
sh /opt/zookeeper/bin/zkServer.sh status
## 进入client
sh /opt/zookeeper/bin/zkCli.sh 
docker exec -it kafka /bin/bash
./bin/zkCli.sh

## clickhouse 集群配置
vim /etc/clickhouse-server/metrika.xml
cickhouse client -m
service clickhouse-server start
service clickhouse-server stop
service clickhouse-server restart
clickhouse-client --user default --password jinZhou7766 -m
./clickhouse-client --format_csv_delimiter="|" -udefault --password jinZhou7766 --database="jin_zhou" --query="select * from alert_log FORMAT CSV" > /var/lib/clickhouse/user_files/alert_log_test.csv

./clickhouse-client --format_csv_delimiter="|" -udefault --password jinZhou7766 --database="jin_zhou" --query="INSERT INTO alert_log FORMAT CSV" < /var/lib/clickhouse/user_files/alert_log_test.csv

##flink
cd /opt/flinkClient/flink-1.13.2/bin     ./start-cluster.sh   ./stop-cluster.sh

## mvn 全jar
mvn assembly:assembly  -DskipTests

## redis 
/usr/local/redis/bin/redis-cli 

#其他
nc -l 9655

lsof abc.txt 显示开启文件abc.txt的进程

lsof -i :22 知道22端口被哪个进程占用

lsof -p 12 看进程号为12的进程打开了哪些文件

df -h 磁盘挂载

systemctl restart mysql.service

systemctl enable mysql.service

##占用的端口
netstat  -anp  |grep
clickhouse 9000
flink  6123   8081


## 占用CPU资源最多的10个进程，可以使用如下命令组合：
ps aux|head -1;ps aux|grep -v PID|sort -rn -k +3|head
 
## 获取占用内存资源最多的10个进程，可以使用如下命令组合：
ps aux|head -1;ps aux|grep -v PID|sort -rn -k +4|head


## java jar运行
nohup java -jar /opt/loadChData/jd_study.jar > /opt/loadChData/jd_study.log 2>&1 &

## docker相关
docker ps  ##正在运行的docker容器 docker ps -a  ## 包括已经停止的容器

docker images  ##查看所有镜像 docker pull image-name  ##下载image
 
docker stop Name/ID  #停止 docker start Name/ID  #启动   docker kill Name/ID   #杀死一个容器
docker rm image-name  #删除一个经停止的容器
docker rmi image-name ##删除一个或者多个镜像
docker rm -f kafka    #强制删除,停止并删除
docker run -it image_name /bin/bash ##互式进入容器中
docker exec -it zookeeper /bin/bash # 在容器内运行 exit退出 docker exec container-name touch a.txt #在容器内部运行进程
docker exec mysql mysql -uadmin -p
/usr/bin/mysql -h 127.0.0.1 -uadmin -pBane@7766 
docker container prune -f ##删除所有无用的容器 image 

bash /work/services/configure-service/kafkaDockerRestart.sh 172.18.32.242

##docker hub命令
docker login dockerhub.venuscloud.cn
Username: admin
Password: venus@jinzhou

docker tag 9159db261968 dockerhub.venuscloud.cn/vsm/services/data-manage:5.1.0.1.1

docker tag be1b2f0b164a dockerhub.venuscloud.cn/vsm/middleware/yandex/clickhouse-server:20.8.3.18


docker tag cd4c5990ed5f dockerhub.venuscloud.cn/vsm/middleware/wurstmeister/kafka:2.11-0.10.2.2
docker tag 7e6fc3713adf dockerhub.venuscloud.cn/vsm/middleware/zookeeper:3.4.14

docker pull dockerhub.venuscloud.cn/vsm/middleware/zookeeper:3.4.14
docker pull dockerhub.venuscloud.cn/vsm/middleware/wurstmeister/kafka:2.11-0.10.2.2
## k8s命令:



##服务器cpu sse4.2检查
grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported"

docker build -t spider:v2.0 .

k8s部署命令:
docker rmi data-manage:v1.5.1.1
docker rmi dockerhub.venuscloud.cn/vsm/services/data-manage:v1.5.1.1
docker rm -f dockerhub.venuscloud.cn/vsm/services/data-manage:v4.1.0.4.1


88679561  8-11.30 2.30 -4
docker build -t dockerhub.venuscloud.cn/vsm/services/data-manage:1.0.0.3.1 .

docker push dockerhub.venuscloud.cn/vsm/services/data-manage:1.0.0.3.1

cd /work/code/config/work/data-manage
docker pull dockerhub.venuscloud.cn/vsm/services/data-manage:1.0.0.3.1
kubectl delete -f .
kubectl apply -f .


cd /work/config/work/data-manage/

kubectl get pods -n work

kubectl  logs -f -n work data-manage-7b6bc5fc95-qftt4 --tail=1
 
docker image inspect c5f25a3cc188
docker image inspect dockerhub.venuscloud.cn/vsm/services/data-manage:4.1.0.15.1
docker image inspect [镜像名] 查id


sudo kubectl -n work logs -f `sudo kubectl -n work get pods |grep data-manage |awk '{print $1}'|head -n 1` 

sudo kubectl -n work delete pod `sudo kubectl -n work get pods |grep data-manage |awk '{print $1}'|head -n 1`


1.删除悬空的镜像
docker image prune -a -f
2.悬空的容器
docker container prune -f

docker commit -a "by zsf" -m "add topic to zk" -p 7e6fc3713adf dockerhub.venuscloud.cn/vsm/middleware/zookeeper:3.4.14
docker commit -a "by zsf" -m "add topic to kafka" -p cd4c5990ed5f dockerhub.venuscloud.cn/vsm/middleware/wurstmeister/kafka:2.11-0.10.2.2
docker push dockerhub.venuscloud.cn/vsm/middleware/zookeeper:3.4.14
 
 bash /work/services/configure-service/kafkaDockerRestart.sh 172.18.32.242
 
kubectl exec -it data-manage-7bc9b496c7-6vm4r -n work sh


 ftp://172.18.32.240/data

docker tag cd4c5990ed5f dockerhub.venuscloud.cn/vsm/middleware/wurstmeister/kafka:2.11-0.10.2.2
docker tag d762a05e6d0c dockerhub.venuscloud.cn/vsm/services/data-manage:4.1.0.7.3

ftp:
setsebool allow_ftpd_full_access on
systemctl stop firewalld.service
systemctl restart vsftpd.service


ssh: root/root123 web:https://172.18.37.240:8072/login sysadmin@system Mar@2023 


docker commit -a "by zsf" -m "add date exact" -p feda2897dc9a dockerhub.venuscloud.cn/vsm/middleware/wurstmeister/kafka:2.11-0.10.2.2

bash /work/services/configure-service/kafkaDockerRestart.sh 172.18.37.240



##升级clickhouse
yum list installed | grep clickhouse
yum remove -y clickhouse-client.x86_64
yum remove -y clickhouse-common-static.x86_64 

grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported"
yuminstall -y curl
yum install -y curl
yum install -y libtool
sudo yum install -y clickhouse-server clickhouse-client
yum list installed| grep clickhouse
yum remove -y clickhouse-common-static.x86_64 

sudo yum install yum-utils
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/stable/x86_64
sudo yum list 'clickhouse*'



docker push dockerhub.venuscloud.cn/vsm/services/data-manage:2.1.0.11.1

ssh root@10.51.254.252  root VE6@.fllR1


1. 选择指定分支并拷贝对应链接
2. 在本地执行git clone -b R0303 https://gitlab.venuscloud.cn/vsm/upgrade-package.git，或类似效果的指令将项目克隆到本地，-b后面跟的是要拉取的分支名，如果升级包过大，可以添加 --depth=1 进行拉取
3. 跳转到上一步克隆的项目根目录，执行git branch R0301-liuqi用于创建本地自己的分支，然后执行git checkout R0301-liuqi跳转过去
4. 在本地分支做好修改后，提交，并使用命令git push origin R0301-liuqi推送到远程gitlab。此时终端会返回一个链接：


下次做镜像的时候注意 config 下的yaml
git clone -b R0303 https://gitlab.venuscloud.cn/vsm/upgrade-package.git

git branch R0303-zsf0105
git checkout R0303-zsf0105
git push R0303-zsf0105


git clone -b develop https://gitlab.venuscloud.cn/vsm/config.git
git branch dev-zsf12129
git checkout dev-zsf12129
git push dev-zsf12129


git clone -b cpe-dev https://gitlab.venuscloud.cn/vsm/config.git
git branch cpe-dev-zsf128
git checkout cpe-dev-zsf128
git push cpe-dev-zsf128


计算出每条数据大小:eachMessageSize = usedSize3Day/countCH3Day
计算出ch分区可以存储的数据量:numOfChStore=totalSize/eachMessageSize
每个小时检查一次ch数据量:numOfChUsed设置数据量阈值:如 70%
计算ch存储阈值的数据量:numOfChStoreMax

kafka_csplus         log_csplus

docker save 镜像名字:版本号 > 要打包的名字.tar
kafka-topics.sh --bootstrap-server 172.18.38.123:9092 --topic syslog_csplus --describe

kubeadm init --kubernetes-version=1.22.8 --image-repository registry.aliyuncs.com/google_containers --apiserver-advertise-address=172.18.38.123  --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16 --v=5

kubectl create namespace middleware
获取所有命名空间
kubectl get ns
获取所有命名空间pod
kubectl get pods -A  
kubectl get pods -n middleware
运行pod
kubectl -n middleware exec -it kafka-6b6f575fbf-w2dsm bash
kubectl  logs -f -n middleware kafka01-575bb9884d-qq6cd
查看域名和ip
kubectl -n middleware get svc

kubectl get deployment -n middleware
delete deployment zookeeper01 -n middleware

kubectl describe pod/test-k8s-68bb74d654-p2n8f
kubectl delete pod zookeeper01-6bc6b8565c-d9t4t -n middleware

kubectl delete svc/zookeeper01 -n middleware
kubectl delete deploy/zookeeper01 -n middleware


kubectl delete -f .
kubectl apply -f .

zk service 

端口开关:
cat /etc/iptables.sh |grep 22
/usr/sbin/iptables -t nat -D MAR -i enp102s0f1 -p tcp -m tcp  --dport 8123 -j DNAT --to-destination 169.254.1.1
/usr/sbin/iptables -t nat -D MAR -i ens3 -p tcp -m tcp  --dport 22 -j DNAT --to-destination 169.254.1.1

/usr/sbin/iptables -t nat -D MAR -i enp102s0f0 -p tcp -m tcp  --dport 8808 -j DNAT --to-destination 169.254.1.1
/usr/sbin/iptables -t nat -D MAR -i ens3 -p tcp -m tcp  --dport 3306 -j DNAT --to-destination 169.254.1.1
/usr/sbin/iptables -t nat -D MAR -i ens3 -p tcp -m tcp  --dport 8123 -j DNAT --to-destination 169.254.1.1


source <(kubectl completion bash)
/usr/sbin/iptables -t nat -D MAR -i enp102s0f3 -p tcp -m tcp  --dport 3306 -j DNAT --to-destination 169.254.1.1

升级清理目录:
/opt/vsm/upgrade
启明早晨上涨时不要加仓,上涨时不要加仓,从当日高点下跌三个点以后才能加仓!!!!!!
时刻关注股票价格,在26-28必然会震荡一段时间 主力吸筹洗盘


#!/bin/bash
#命名空间
namespace=$1
if [ -z "$namespace" ];then
   echo  "Please usage: bash  $0 <namespace>"
   exit 0
fi
#获取pod的名字
pod_name=$(kubectl get pod -n$1 |grep -v NAME |awk '{print $1}')
#获取pod对应的镜像信息
for pod  in $pod_name
do
    image=$( kubectl get pod -n $1  $pod -ojson | grep "\<image\>"| awk  '{print $2}' | sed 's/,//g' )
    echo -e "\t $pod: $image"
done



virt-manager
cd /ftp/redhatIso/

升级包路径
/opt/tmp/20221213upgrade/upgrade-package


补丁包路径
/root/build-pathc-package/upgrade
./upgrade func -m build -v 0303 -y ./yamlconfig/



