executable_kubectl-watch 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. #!/usr/bin/env bash
  2. if [[ "$1" == "scheduler" ]]
  3. then
  4. viddy_command()
  5. {
  6. cat <<EOF
  7. $ kubectl get pods --field-selector status.phase=Pending --all-namespaces -o wide
  8. $(kubectl get pods --field-selector status.phase=Pending --all-namespaces -o wide)
  9. $ kubectl get pods --field-selector status.phase=Failed --all-namespaces -o wide
  10. $(kubectl get pods --field-selector status.phase=Failed --all-namespaces -o wide)
  11. EOF
  12. if [[ $(tput cols) -gt 200 ]]
  13. then
  14. cat <<EOF
  15. $ kubectl get pod -A -o wide | awk '/NAME/ || ($3 ~ "0/.*" && $4 != "Completed") { print $0 }'
  16. $(kubectl get pod -A -o wide | awk '/NAME/ || ($3 ~ "0/.*" && $4 != "Completed") { print $0 }')
  17. $ kubectl get pod -o wide -A | grep 'NAME\|(.*m.* ago)'
  18. $(kubectl get pod -o wide -A | grep 'NAME\|(.*[ms].* ago)')
  19. EOF
  20. else
  21. cat <<EOF
  22. $ kubectl get pod -A | awk '/NAME/ || ($3 ~ "0/.*" && $4 != "Completed") { print $0 }'
  23. $(kubectl get pod -A | awk '/NAME/ || ($3 ~ "0/.*" && $4 != "Completed") { print $0 }')
  24. $ kubectl get pod -A | grep 'NAME\|(.*m.* ago)'
  25. $(kubectl get pod -A | grep 'NAME\|(.*[ms].* ago)')
  26. EOF
  27. fi
  28. }
  29. export -f viddy_command
  30. viddy_args=(
  31. --no-title
  32. --interval "${KUBECTL_WATCH_INTERVAL:-10}"
  33. )
  34. # Upstream viddy does not have max-history flag.
  35. if [[ $(viddy --help) == *max-history* ]]
  36. then
  37. viddy_args+=("--max-history=${KUBECTL_WATCH_HISTORY:-10}")
  38. fi
  39. viddy "${viddy_args[@]}" viddy_command
  40. elif [[ -z "$1" ]]
  41. then
  42. viddy -n "${KUBECTL_WATCH_INTERVAL:-10}" "kubectl get nodes -o wide; echo; kubectl get all -A -o wide; echo; kubectl get ingress -o wide; echo; kubectl get secrets -o wide; echo; kubectl get all -n kube-system -A -o wide"
  43. else
  44. namespace="$1"
  45. export namespace
  46. opensearch_query()
  47. {
  48. cat <<EOF
  49. {
  50. "version": true,
  51. "size": 1000,
  52. "sort": [
  53. {
  54. "@timestamp": {
  55. "order": "desc",
  56. "unmapped_type": "boolean"
  57. }
  58. }
  59. ],
  60. "aggs": {
  61. "2": {
  62. "date_histogram": {
  63. "field": "@timestamp",
  64. "calendar_interval": "1m",
  65. "time_zone": "Europe/Moscow",
  66. "min_doc_count": 1
  67. }
  68. }
  69. },
  70. "stored_fields": [
  71. "*"
  72. ],
  73. "script_fields": {},
  74. "docvalue_fields": [
  75. {
  76. "field": "@timestamp",
  77. "format": "date_time"
  78. },
  79. {
  80. "field": "time",
  81. "format": "date_time"
  82. }
  83. ],
  84. "_source": {
  85. "excludes": []
  86. },
  87. "query": {
  88. "bool": {
  89. "must": [],
  90. "filter": [
  91. {
  92. "match_all": {}
  93. },
  94. {
  95. "match_phrase": {
  96. "kubernetes.namespace_name.keyword": "$namespace"
  97. }
  98. },
  99. {
  100. "range": {
  101. "@timestamp": {
  102. "gte": "$(date -u -d "-60 minutes" +"%Y-%m-%dT%H:%M:%S.%3NZ")",
  103. "lte": "$(date -u +"%Y-%m-%dT%H:%M:%S.%3NZ")",
  104. "format": "strict_date_optional_time"
  105. }
  106. }
  107. }
  108. ],
  109. "should": [],
  110. "must_not": []
  111. }
  112. },
  113. "highlight": {
  114. "pre_tags": [
  115. "@opensearch-dashboards-highlighted-field@"
  116. ],
  117. "post_tags": [
  118. "@/opensearch-dashboards-highlighted-field@"
  119. ],
  120. "fields": {
  121. "*": {}
  122. },
  123. "fragment_size": 2147483647
  124. }
  125. }
  126. EOF
  127. }
  128. export -f opensearch_query
  129. viddy_command()
  130. {
  131. case "$namespace" in
  132. pdns)
  133. kustomization="powerdns"
  134. ;;
  135. kube-system)
  136. kustomization="cilium"
  137. ;;
  138. *)
  139. kustomization="$namespace"
  140. ;;
  141. esac
  142. if [[ $(kubectl get -n "$namespace" scaledobjects -o json | jq '.items | length') -gt 0 ]]
  143. then
  144. cat <<EOF
  145. $ kubectl get -n "$namespace" scaledobjects
  146. $(kubectl get -n "$namespace" scaledobjects)
  147. EOF
  148. fi
  149. if [[ $(kubectl get -n "$namespace" scaledjobs -o json | jq '.items | length') -gt 0 ]]
  150. then
  151. cat <<EOF
  152. $ kubectl get -n "$namespace" scaledjobs
  153. $(kubectl get -n "$namespace" scaledjobs)
  154. EOF
  155. fi
  156. case "$KUBECONFIG" in
  157. *config-mjru-cluster1)
  158. PROMETHEUS_URL=https://prometheus.corp1.majordomo.ru
  159. OPENSEARCH_ENDPOINT="https://opensearch.corp1.majordomo.ru"
  160. OPENSEARCH_PASSWORD="$(pass show majordomo/public/opensearch-dashboards/admin)"
  161. ALERTMANAGER_URL="https://alertmanager.corp1.majordomo.ru"
  162. VAULT_TOKEN="$(pass show majordomo/public/vault/root)"
  163. VAULT_ADDR="https://vault.intr"
  164. ;;
  165. *config-mjru-cluster2)
  166. PROMETHEUS_URL=https://prometheus.corp2.majordomo.ru
  167. OPENSEARCH_ENDPOINT="https://opensearch.corp2.majordomo.ru:9200"
  168. OPENSEARCH_PASSWORD="$(pass show majordomo/public/opensearch-dashboards/admin)"
  169. ALERTMANAGER_URL="https://alertmanager.corp2.majordomo.ru"
  170. ;;
  171. *config-home-k8s)
  172. PROMETHEUS_URL=https://prometheus.home.wugi.info
  173. OPENSEARCH_ENDPOINT="https://node-0.example.com:9200"
  174. OPENSEARCH_PASSWORD="$(pass show localhost/opensearch-dashboards/admin)"
  175. ALERTMANAGER_URL="https://alertmanager.home.wugi.info"
  176. ;;
  177. esac
  178. case "$KUBECONFIG" in
  179. *config-mjru-*)
  180. cat <<EOF
  181. $ git ls-remote https://gitlab.corp1.majordomo.ru/cd/fluxcd.git master
  182. $(git ls-remote https://gitlab.corp1.majordomo.ru/cd/fluxcd.git master)
  183. EOF
  184. ;;
  185. *config-home-*)
  186. cat <<EOF
  187. $ git ls-remote https://gitlab.com/wigust/dotfiles.git master
  188. $(git ls-remote https://gitlab.com/wigust/dotfiles.git master)
  189. EOF
  190. ;;
  191. esac
  192. if kubectl get -n "$namespace" scaledobjects &> /dev/null
  193. then
  194. mapfile -t queries < <(kubectl get -n "$namespace" scaledobjects "${namespace}-scaledobject" -o json | jq --raw-output '.spec.triggers[] | .metadata.query')
  195. for query in "${queries[@]}"
  196. do
  197. cat <<EOF
  198. query: ${query}
  199. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@-)
  200. EOF
  201. done
  202. fi
  203. if kubectl get -n "$namespace" scaledjobs &> /dev/null
  204. then
  205. mapfile -t queries < <(kubectl get -n "$namespace" scaledjobs "${namespace}-scaledobject" -o json | jq --raw-output '.spec.triggers[] | .metadata.query')
  206. for query in "${queries[@]}"
  207. do
  208. cat <<EOF
  209. query: ${query}
  210. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@-)
  211. EOF
  212. done
  213. fi
  214. cat <<EOF
  215. $ kubectl get -n flux-system gitrepositories.source.toolkit.fluxcd.io flux-system
  216. $(kubectl get -n flux-system gitrepositories.source.toolkit.fluxcd.io flux-system)
  217. EOF
  218. if kubectl get -n flux-system kustomizations.kustomize.toolkit.fluxcd.io "$kustomization" &> /dev/null
  219. then
  220. cat <<EOF
  221. $ kubectl get -n flux-system kustomizations.kustomize.toolkit.fluxcd.io "$kustomization"
  222. $(kubectl get -n flux-system kustomizations.kustomize.toolkit.fluxcd.io "$kustomization")
  223. EOF
  224. fi
  225. mapfile -t helmreleases < <(kubectl -n "$namespace" get helmreleases.helm.toolkit.fluxcd.io --no-headers=true --output=custom-columns='NAME:metadata.name')
  226. for helmrelease in "${helmreleases[@]}"
  227. do
  228. if kubectl -n "$namespace" get helmreleases.helm.toolkit.fluxcd.io "$helmrelease" &> /dev/null
  229. then
  230. cat <<EOF
  231. $ kubectl -n "$namespace" get helmreleases.helm.toolkit.fluxcd.io "$helmrelease"
  232. $(kubectl -n "$namespace" get helmreleases.helm.toolkit.fluxcd.io "$helmrelease")
  233. $ kubectl -n "$namespace" get -o json helmreleases.helm.toolkit.fluxcd.io "$helmrelease" | yq -y .status
  234. $(kubectl -n "$namespace" get -o json helmreleases.helm.toolkit.fluxcd.io "$helmrelease" | yq -y .status)
  235. EOF
  236. fi
  237. done
  238. if [[ $namespace == "kube-system" ]]
  239. then
  240. mapfile -t nodes_with_noschedule_taint < <(kubectl get nodes -o go-template='{{range $item := .items}}{{with $nodename := $item.metadata.name}}{{range $taint := $item.spec.taints}}{{if and (eq $taint.effect "NoSchedule")}}{{printf "%s\n" $nodename}}{{end}}{{end}}{{end}}{{end}}' | sort --version-sort)
  241. nodes_with_noschedule()
  242. {
  243. for node in "${nodes_with_noschedule_taint[@]}"
  244. do
  245. echo -n "${node}: "
  246. kubectl get node -o json "$node" \
  247. | jq --monochrome-output --compact-output '.metadata.labels | with_entries(select(.key | test("^.*cluster.local.*$"))) | with_entries(select(.value == "false")) | keys'
  248. done
  249. }
  250. cat <<EOF
  251. $ kubectl get node -o wide | awk 'NR<3{print $0;next}{print $0| "sort --version-sort"}'
  252. $(kubectl get node -o wide | awk 'NR<3{print $0;next}{print $0| "sort --version-sort"}')
  253. $ kubectl get nodes ... # with NoSchedule taint
  254. $(nodes_with_noschedule)
  255. $ kubectl top node | awk 'NR<3{print $0;next}{print $0| "sort --version-sort"}'
  256. $(kubectl top node | awk 'NR<3{print $0;next}{print $0| "sort --version-sort"}')
  257. $ kubectl cluster-info
  258. $(kubectl cluster-info)
  259. $ kubectl get --raw='/readyz?verbose'
  260. $(kubectl get --raw='/readyz?verbose')
  261. EOF
  262. if ihs --help &> /dev/null
  263. then
  264. echo "hypervisor hosts:"
  265. (
  266. mapfile -t masters < <(kubectl master)
  267. for master in "${masters[@]}"
  268. do
  269. mapfile -t ip_addresses < <(command dig "${master}.intr" \
  270. | jc --dig \
  271. | jq --raw-output .[].answer[].data)
  272. for ip_address in "${ip_addresses[@]}"
  273. do
  274. master_hypervisor()
  275. {
  276. command dig -x "$ip_address" \
  277. | jc --dig \
  278. | jq --raw-output '[.[].answer[].data][] | select(. | startswith("vm"))'
  279. }
  280. master_hypervisor_name="$(master_hypervisor)"
  281. printf "%s %s %s %s.intr\n" \
  282. "$master" \
  283. "$ip_address" \
  284. "${master_hypervisor_name/%.intr./}" \
  285. "$(ihs vm dump "$master_hypervisor_name" | recsel -Pname | grep ^kvm)"
  286. done
  287. done
  288. )
  289. printf "\n"
  290. fi
  291. fi
  292. if [[ $namespace == "flux-system" ]]
  293. then
  294. cat <<EOF
  295. $ kubectl get --namespace flux-system kustomizations ... # suspended
  296. $(kubectl get --namespace flux-system kustomizations.kustomize.toolkit.fluxcd.io --output=json | jq --raw-output '.items[] | select(.spec.suspend == true) | .metadata.name')
  297. $ kubectl get --all-namespaces helmreleases ... # suspended
  298. $(kubectl get --all-namespaces helmreleases.helm.toolkit.fluxcd.io --output=json | jq --raw-output '.items[] | select(.spec.suspend == true) | .metadata.name'
  299. tf-controller)
  300. $ kubectl get --all-namespaces kustomizations.kustomize.toolkit.fluxcd.io
  301. $(kubectl get --all-namespaces kustomizations.kustomize.toolkit.fluxcd.io)
  302. $ kubectl get --all-namespaces helmreleases.helm.toolkit.fluxcd.io
  303. $(kubectl get --all-namespaces helmreleases.helm.toolkit.fluxcd.io)
  304. EOF
  305. fi
  306. for namespace_suffix in mda kvm
  307. do
  308. if [[ $namespace == "backup-${namespace_suffix}" ]]
  309. then
  310. for metrics in performed errors
  311. do
  312. query="backup_${namespace_suffix}_${metrics}_total{namespace=\"$namespace\"}"
  313. cat <<EOF
  314. query: ${query}
  315. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@- | jq --raw-output '.data.result[] | .value[1]' | xargs echo)
  316. EOF
  317. done
  318. fi
  319. done
  320. cat <<EOF
  321. $ kubectl top --namespace "$namespace" pod
  322. $(kubectl top --namespace "$namespace" pod)
  323. $ kubectl get all --namespace "$namespace" -o wide
  324. $(kubectl get all --namespace "$namespace" -o wide)
  325. $ kubectl get endpoints --namespace "$namespace" -o wide
  326. $(kubectl get endpoints --namespace "$namespace" -o wide)
  327. $ kubectl get ingress --namespace "$namespace" -o wide
  328. $(kubectl get ingress --namespace "$namespace" -o wide)
  329. EOF
  330. if kubectl get -n "$namespace" ingress &> /dev/null
  331. then
  332. mapfile -t hosts < <(kubectl -n "$namespace" get ingress -o json | jq --raw-output '.items[] | .spec.rules[] | .host' | sort -u)
  333. for host in "${hosts[@]}"
  334. do
  335. query="probe_http_status_code{instance=\"https://${host}/\"}"
  336. cat <<EOF
  337. query: ${query}
  338. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@-)
  339. EOF
  340. done
  341. fi
  342. cat <<EOF
  343. $ amtool --alertmanager.url="$ALERTMANAGER_URL" alert query --output=simple namespace="$namespace"
  344. $(amtool --alertmanager.url="$ALERTMANAGER_URL" alert query --output=simple namespace="$namespace")
  345. $ kubectl get --namespace "$namespace" certificates -o wide
  346. $(kubectl get --namespace "$namespace" certificates -o wide)
  347. $ kubectl get secrets --namespace "$namespace" -o wide
  348. $(kubectl get secrets --namespace "$namespace" -o wide)
  349. $ kubectl get --namespace "$namespace" serviceaccounts
  350. $(kubectl get --namespace "$namespace" serviceaccounts)
  351. $ kubectl get pvc --namespace "$namespace" -o wide
  352. $(kubectl get pvc --namespace "$namespace" -o wide)
  353. EOF
  354. query="kubelet_volume_stats_available_bytes{job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\"}"
  355. cat <<EOF
  356. query: ${query}
  357. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@- | jq --raw-output '.data.result[] | .value[1]' | numfmt --to=iec-i --suffix=B | xargs echo)
  358. $ kubectl get networkpolicies --namespace "$namespace" -o wide
  359. $(kubectl get networkpolicies --namespace "$namespace" -o wide)
  360. $ kubectl get events --namespace "$namespace" -o wide
  361. $(kubectl get events --namespace "$namespace" -o wide)
  362. EOF
  363. if [[ $namespace == "elasticsearch" ]]
  364. then
  365. OPENSEARCH_ENDPOINT="http://es.intr:9200"
  366. cat <<EOF
  367. # logs
  368. ssh -q fluentd.intr sudo tail -f /home/jenkins/es-curator/curator.log
  369. ssh -q kvm15.intr journalctl -u elasticsearch.service -f | grep -vF '[max_concurrent_shard_requests] is not supported in the metadata section and will be rejected in 7.x'
  370. ssh -q fluentd.intr sudo docker logs --tail 10 -f elk_elasticsearch_1 | grep -vF '[max_concurrent_shard_requests] is not supported in the metadata section and will be rejected in 7.x'
  371. ssh -q staff.intr journalctl -u elasticsearch.service -f | grep -vF '[max_concurrent_shard_requests] is not supported in the metadata section and will be rejected in 7.x'
  372. EOF
  373. fi
  374. OPENSEARCH_ARGS=(
  375. --user "admin:${OPENSEARCH_PASSWORD}"
  376. )
  377. if [[ $namespace == "elasticsearch" ]] || [[ $namespace == "opensearch" ]]
  378. then
  379. cat <<EOF
  380. $ curl --max-time 2 --insecure --silent -XGET "${OPENSEARCH_ENDPOINT}/_cluster/state/nodes?pretty"
  381. $(curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" -XGET "${OPENSEARCH_ENDPOINT}/_cluster/state/nodes?pretty")
  382. $ curl --max-time 2 --insecure --silent -XGET "${OPENSEARCH_ENDPOINT}/_cat/nodes?h=name,ram.percent,cpu,load_1m,disk.used_percent&v"
  383. $(curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" -XGET "${OPENSEARCH_ENDPOINT}/_cat/nodes?h=name,ram.percent,cpu,load_1m,disk.used_percent&v" | sort --version-sort)
  384. $ curl --max-time 2 --insecure --silent -XGET "${OPENSEARCH_ENDPOINT}/_cat/allocation?v&pretty"
  385. $(curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" -XGET "${OPENSEARCH_ENDPOINT}/_cat/allocation?v&pretty" | awk '{ print $9, $0 | "column --table" }' | awk '{ print $10="", $0 | "column --table" }' | awk 'NR<2{print $0;next}{print $0| "sort --version-sort"}')
  386. $ curl --max-time 2 --insecure --silent -XGET "${OPENSEARCH_ENDPOINT}/_cluster/health?pretty"
  387. $(curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" -XGET "${OPENSEARCH_ENDPOINT}/_cluster/health?pretty")
  388. $ curl --max-time 2 --insecure --silent "${OPENSEARCH_ENDPOINT}/_cat/indices?v&pretty"
  389. $(command curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" "${OPENSEARCH_ENDPOINT}/_cat/indices?v&pretty" | awk '{ print $3, $0 | "column --table" }' | awk '{ print $4="", $0 | "column --table" }' | awk 'NR<2{print $0;next}{print $0| "sort --version-sort"}')
  390. $ curl --max-time 2 --insecure --silent "${OPENSEARCH_ENDPOINT}/_cat/shards?v&pretty"
  391. $(command curl --max-time 2 --insecure --silent "${OPENSEARCH_ARGS[@]}" "${OPENSEARCH_ENDPOINT}/_cat/shards?v&pretty" | awk '{ print $1 "@" $NF, $0 | "column --table" }' | awk '{ print $2="", $NF="", $0 | "column --table" }' | awk 'NR<2{print $0;next}{print $0| "sort --version-sort"}')
  392. $ curl --max-time 2 --header 'Content-Type: application/json' --insecure --silent --data '{"query": {"match_all": {}}}' "${OPENSEARCH_ENDPOINT}/logstash-*/_search" | jq --raw-output '.hits.hits[] | ._source.log'
  393. $(command curl --max-time 2 --header 'Content-Type: application/json' --insecure --silent "${OPENSEARCH_ARGS[@]}" --data '{"query": {"match_all": {}}}' "${OPENSEARCH_ENDPOINT}/logstash-*/_search" | jq --raw-output '.hits.hits[] | ._source.log')
  394. # Show recovery operations in OpenSearch.
  395. $ curl --max-time 2 --header 'Content-Type: application/json' --insecure --silent "${OPENSEARCH_ENDPOINT}/_cat/recovery?active_only=true&v=true"
  396. $(command curl --max-time 2 --header 'Content-Type: application/json' --insecure --silent "${OPENSEARCH_ARGS[@]}" "${OPENSEARCH_ENDPOINT}/_cat/recovery?active_only=true&v=true")
  397. EOF
  398. fi
  399. if [[ $namespace == "harbor" ]]
  400. then
  401. cat <<EOF
  402. $ git ls-remote https://gitlab.corp1.majordomo.ru/cd/tf-harbor.git master
  403. $(git ls-remote https://gitlab.corp1.majordomo.ru/cd/tf-harbor.git master)
  404. $ kubectl get -n flux-system gitrepositories.source.toolkit.fluxcd.io tf-harbor
  405. $(kubectl get -n flux-system gitrepositories.source.toolkit.fluxcd.io tf-harbor)
  406. $ kubectl describe -n flux-system terraforms.infra.contrib.fluxcd.io tf-harbor
  407. $(kubectl describe -n flux-system terraforms.infra.contrib.fluxcd.io tf-harbor)
  408. EOF
  409. fi
  410. if [[ $namespace == "mda" ]]
  411. then
  412. query='
  413. node_filesystem_free_bytes{instance="172.16.103.230:9100", fstype!="tmpfs", fstype!="ramfs", mountpoint!="/", mountpoint!="/nix/store", mountpoint!="/boot"}
  414. or node_filesystem_free_bytes{instance="172.16.103.47:9100", fstype!="tmpfs", fstype!="ramfs", mountpoint!="/", mountpoint!="/nix/store", mountpoint!="/boot"}
  415. '
  416. cat <<EOF
  417. query: ${query}
  418. $(echo "$query" | curl --max-time 2 --silent --get "${PROMETHEUS_URL}/api/v1/query" --data-urlencode query@- | jq --raw-output '.data.result[] | .value[1]' | numfmt --to=iec-i --suffix=B | xargs echo)
  419. EOF
  420. fi
  421. if [[ $namespace == "smtp" ]]
  422. then
  423. mapfile -t smtp_hosts < <(getent hosts smtpout1.majordomo.ru smtpout2.majordomo.ru | cut -f 1 -d ' ')
  424. smtp_hosts+=(78.108.86.12) # on smtp-staff.intr server.
  425. cat <<EOF
  426. mxtoolbox.com:
  427. EOF
  428. for host in "${smtp_hosts[@]}"
  429. do
  430. echo "https://mxtoolbox.com/SuperTool.aspx?action=blacklist%3a${host}&run=toolpage"
  431. done
  432. fi
  433. if [[ $namespace == "redis" ]]
  434. then
  435. cat <<EOF
  436. $ dig redis.intr
  437. $(bash -ic "dig redis.intr 2>&1")
  438. Listing All Databases
  439. In the first place, the number of databases in Redis is fixed. Therefore, we
  440. can extract this information from the configuration file with a simple grep
  441. command:
  442. $ redis-cli -h redis.intr CONFIG GET databases
  443. $(redis-cli -h redis.intr CONFIG GET databases)
  444. Listing All Databases With Entries
  445. Sometimes we'll want to get more information about the databases that contain
  446. keys. In order to do that, we can take advantage of the Redis INFO command,
  447. used to get information and statistics about the server. Here, we specifically
  448. want to focus our attention in the keyspace section, which contains
  449. database-related data:
  450. $ redis-cli -h redis.intr INFO keyspace
  451. $(redis-cli -h redis.intr INFO keyspace)
  452. The output lists the databases containing at least one key, along with a few
  453. statistics:
  454. - number of keys contained
  455. - number of keys with expiration
  456. - keys' average time-to-live
  457. $ redis-cli -h redis.intr INFO
  458. $(redis-cli -h redis.intr INFO)
  459. $ redis-cli -h redis.intr CLIENT LIST
  460. $(redis-cli -h redis.intr CLIENT LIST)
  461. EOF
  462. fi
  463. if [[ $namespace == "nfs" ]]
  464. then
  465. cat <<EOF
  466. $ dig nfs.intr
  467. $(bash -ic "dig nfs.intr 2>&1")
  468. $ ssh root@kube6.intr showmount -e
  469. $(ssh root@kube6.intr showmount -e)
  470. $ ssh root@kube6.intr showmount
  471. $(ssh root@kube6.intr showmount)
  472. $ ssh root@kube6.intr rpcinfo
  473. $(ssh root@kube6.intr rpcinfo)
  474. $ ssh root@kube6.intr rpcinfo -p
  475. $(ssh root@kube6.intr rpcinfo -p)
  476. $ ssh root@kube6.intr nfsstat
  477. $(ssh root@kube6.intr nfsstat)
  478. $ ssh root@kube6.intr nfsstat -m
  479. $(ssh root@kube6.intr nfsstat -m)
  480. EOF
  481. fi
  482. if [[ $namespace == "keda" ]]
  483. then
  484. cat <<EOF
  485. $ kubectl get --raw /apis/external.metrics.k8s.io
  486. $(kubectl get --raw /apis/external.metrics.k8s.io)
  487. EOF
  488. fi
  489. if [[ $namespace == "vault" ]]
  490. then
  491. vault_addrs=(
  492. "$VAULT_ADDR"
  493. "http://dh1-mr.intr:8210"
  494. "http://dh2-mr.intr:8220"
  495. "http://dh3-mr.intr:8230"
  496. )
  497. cat <<EOF
  498. An explanation of each field in the status output follows.
  499. - Seal Type: The type of seal in use. This value should match across cluster
  500. members.
  501. - Initialized: Whether the underlying storage has been initialized. This
  502. should always appear with a value of true in any case except that of a new
  503. and uninitialized server.
  504. - Sealed: Whether the server is in a sealed or unsealed state. A sealed server
  505. cannot participate in cluster membership or otherwise be used until it is
  506. unsealed. All members of a healthy cluster should report a value of false.
  507. - Total Shares: The number of key shares made from splitting the root key
  508. (previously known as master key); this value can only defined during
  509. initialization.
  510. - Threshold: The number of key shares required to compose the root key; this
  511. value can only defined during initialization.
  512. - Version: The version of Vault in use on the server.
  513. - Storage Type: The type of storage in use.
  514. - Cluster Name: The cluster name string; this value should match on all
  515. members of a healthy cluster.
  516. - Cluster ID: The cluster identification string; this value is dynamically
  517. generated by default, and should match on all members of a healthy cluster.
  518. - HA Enabled: Whether this cluster is using high availability (HA)
  519. coordination functionality.
  520. - HA Cluster: The cluster address used in client redirects.
  521. - HA Mode: The HA mode. Expected values are Active and Standby. There should
  522. be one active leader in every healthy cluster. In the example output, the
  523. pod vault-0 is the Active cluster leader.
  524. - Active Node Address: The address of the active HA cluster leader, used in
  525. request forwarding.
  526. - Raft Committed Index: The index value for storage items which are committed
  527. to the log. This value should closely follow or be equal to the value of
  528. Raft Applied Index in a healthy Vault cluster.
  529. - Raft Applied Index: The index value for storage items which are applied, but
  530. not yet committed to the log.
  531. EOF
  532. for vault_addr in "${vault_addrs[@]}"
  533. do
  534. cat <<EOF
  535. $ VAULT_ADDR="$vault_addr" vault status
  536. $(VAULT_ADDR="$vault_addr" "${HOME}/.nix-profile/bin/vault" status)
  537. EOF
  538. done
  539. if [[ $(tput cols) -gt 255 ]]
  540. then
  541. cat <<EOF
  542. $ vault secrets list -detailed
  543. $(vault secrets list -detailed)
  544. EOF
  545. else
  546. cat <<EOF
  547. $ vault secrets list
  548. $(vault secrets list)
  549. EOF
  550. fi
  551. cat <<EOF
  552. $ vault kv list secret/vaultPass/majordomo
  553. $(vault kv list secret/vaultPass/majordomo)
  554. To add a new key value secret run:
  555. $ vault kv put secret/vaultPass/majordomo/HOSTNAME password=SECRET
  556. EOF
  557. fi
  558. echo -e "\n$ curl --max-time 5 --insecure --silent ${OPENSEARCH_ENDPOINT}/logstash-*/_search"
  559. echo "$(opensearch_query)" \
  560. | curl --max-time 5 --insecure --silent "${OPENSEARCH_ARGS[@]}" --header 'Content-Type: application/json' --data @- "${OPENSEARCH_ENDPOINT}/logstash-*/_search" \
  561. | jq --raw-output '.hits.hits | group_by(._source.kubernetes.pod_name)[][] | [._source.kubernetes.pod_name, ._source.log] | @tsv' \
  562. | sort --version-sort
  563. }
  564. export -f viddy_command
  565. viddy_args=(
  566. --no-title
  567. --interval "${KUBECTL_WATCH_INTERVAL:-10}"
  568. )
  569. # Upstream viddy does not have max-history flag.
  570. if [[ $(viddy --help) == *max-history* ]]
  571. then
  572. viddy_args+=("--max-history=${KUBECTL_WATCH_HISTORY:-10}")
  573. fi
  574. if [[ -n $TMUX ]]
  575. then
  576. tmux_window_name="$(tmux display-message -p '#W')"
  577. fi
  578. if [[ $tmux_window_name == bash ]]
  579. then
  580. tmux rename-window "$namespace"
  581. fi
  582. if [[ -e $KUBECTL_WATCH_CGROUP ]]
  583. then
  584. if [[ -e /sys/fs/cgroup/viddy ]]
  585. then
  586. :
  587. else
  588. GID=users
  589. sudo /run/current-system/profile/bin/cgcreate -a "${USER}:${GID}" -t "${USER}:${GID}" -g memory,cpu:viddy
  590. echo 134217728 > /sys/fs/cgroup/viddy/memory.max
  591. fi
  592. sudo cgclassify -g cpu,memory:viddy $$
  593. CGROUP_LOGLEVEL=INFO /run/current-system/profile/bin/cgexec -g memory,cpu:viddy viddy "${viddy_args[@]}" viddy_command
  594. else
  595. viddy "${viddy_args[@]}" viddy_command
  596. fi
  597. # XXX: Dirty hack and has issue that requires to run twice because of
  598. # racing with tmuxifier.
  599. if [[ -n $TMUX ]]
  600. then
  601. if [[ $(tmux display-message -p '#W') == main ]]
  602. then
  603. tmux rename-window "bash"
  604. else
  605. tmux rename-window "$tmux_window_name"
  606. fi
  607. fi
  608. fi