This commit is contained in:
2024-02-20 17:15:27 +08:00
committed by huty
parent 6706e1a633
commit 34158042ad
1529 changed files with 177765 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
version: "3.7"
services:
ch13-elasticsearch:
image: kiamol/ch13-elasticsearch:latest-linux-amd64
ch13-kibana:
image: kiamol/ch13-kibana:latest-linux-amd64

View File

@@ -0,0 +1,8 @@
version: "3.7"
services:
ch13-elasticsearch:
image: kiamol/ch13-elasticsearch:latest-linux-arm64
ch13-kibana:
image: kiamol/ch13-kibana:latest-linux-arm64

View File

@@ -0,0 +1,12 @@
version: "3.7"
services:
ch13-elasticsearch:
image: kiamol/ch13-elasticsearch:latest
build:
context: ./elasticsearch
ch13-kibana:
image: kiamol/ch13-kibana:latest
build:
context: ./kibana

View File

@@ -0,0 +1,42 @@
ARG ALPINE_VERSION="3.15"
FROM alpine:$ALPINE_VERSION AS download-base
WORKDIR /downloads
RUN echo "$(apk --print-arch)" > /arch.txt
FROM download-base AS installer
ARG ES_VERSION="7.10.2"
# find the downloads for previous versions here - https://www.elastic.co/downloads/past-releases#elasticsearch-oss-no-jdk
# 7.10 is the latest version which is OSS, see - https://www.elastic.co/pricing/faq/licensing
# there's no no-jdk version for arm64, so we download the JDK and strip it out
#https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-x86_64.tar.gz
#https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-aarch64.tar.gz
RUN wget -O elasticsearch.tar.gz "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-${ES_VERSION}-linux-$(cat /arch.txt).tar.gz"
RUN mkdir /elasticsearch && \
tar -xzf elasticsearch.tar.gz --strip-components=1 -C /elasticsearch && \
rm -rf /elasticsearch/jdk
# Elasticsearch requires a JVM - this image provides a minimal JRE installation
# see the product-JVM version matrix https://www.elastic.co/support/matrix#matrix_jvm
FROM openjdk:11.0.11-jre-slim
WORKDIR /usr/share/elasticsearch
COPY --from=installer /elasticsearch .
EXPOSE 9200 9300
ENV ES_HOME="/usr/share/elasticsearch" \
ES_JAVA_OPTS="-Xms1024m -Xmx1024m"
COPY elasticsearch.yml log4j2.properties ./config/
RUN groupadd -g 1000 elasticsearch && \
adduser -uid 1000 -gid 1000 --home ${ES_HOME} elasticsearch && \
chmod 0775 ${ES_HOME} && \
chown -R 1000:0 ${ES_HOME}
USER elasticsearch:root
CMD ["/usr/share/elasticsearch/bin/elasticsearch"]

View File

@@ -0,0 +1,3 @@
cluster.name: "kiamol"
discovery.type: single-node
network.host: 0.0.0.0

View File

@@ -0,0 +1,9 @@
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console

View File

@@ -0,0 +1,33 @@
ARG NODE_TAG=10.23.1-alpine3.11
ARG ALPINE_VERSION="3.15"
FROM alpine:$ALPINE_VERSION AS download-base
WORKDIR /downloads
RUN echo "$(apk --print-arch)" > /arch.txt
FROM download-base AS installer
ARG KIBANA_VERSION="7.10.2"
# find the downloads for previous versions here - https://www.elastic.co/downloads/past-releases#kibana-oss
# 7.10 is the latest version which is OSS, see - https://www.elastic.co/pricing/faq/licensing
RUN wget -O kibana.tar.gz https://artifacts.elastic.co/downloads/kibana/kibana-oss-${KIBANA_VERSION}-linux-$(cat /arch.txt).tar.gz
RUN mkdir /kibana && \
tar -xzf kibana.tar.gz --strip-components=1 -C /kibana && \
rm -rf /kibana/node
# Kibana requires Node.js - this image is the official Node distribution
# see the Node.js versions in https://www.elastic.co/guide/en/kibana/master/upgrading-nodejs.html
FROM node:$NODE_TAG
EXPOSE 5601
ENV KIBANA_HOME="/usr/share/kibana"
WORKDIR /usr/share/kibana
COPY --from=installer /kibana .
COPY ./kibana bin/
COPY ./kibana.yml config/
RUN chmod +x bin/kibana
CMD ["/usr/share/kibana/bin/kibana", "--allow-root"]

View File

@@ -0,0 +1,23 @@
#!/bin/sh
SCRIPT=$0
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=$(ls -ld "$SCRIPT")
# Drop everything prior to ->
link=$(expr "$ls" : '.*-> \(.*\)$')
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=$(dirname "$SCRIPT")/"$link"
fi
done
DIR="$(dirname "${SCRIPT}")/.."
CONFIG_DIR=${KBN_PATH_CONF:-"$DIR/config"}
if [ -f "${CONFIG_DIR}/node.options" ]; then
KBN_NODE_OPTS="$(grep -v ^# < ${CONFIG_DIR}/node.options | xargs)"
fi
NODE_OPTIONS="--no-warnings --max-http-header-size=65536 $KBN_NODE_OPTS $NODE_OPTIONS" NODE_ENV=production exec node "${DIR}/src/cli/dist" ${@}

View File

@@ -0,0 +1,112 @@
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# This setting was effectively always `false` before Kibana 6.3 and will
# default to `true` starting in Kibana 7.0.
#server.rewriteBasePath: false
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"
# The URLs of the Elasticsearch instances to use for all your queries.
elasticsearch.hosts: ["http://elasticsearch:9200"]
# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
#elasticsearch.preserveHost: true
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "home"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "user"
#elasticsearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
#elasticsearch.startupTimeout: 5000
# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
#elasticsearch.logQueries: false
# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
#i18n.locale: "en"

View File

@@ -0,0 +1,10 @@
$images=$(yq e '.services.[].image' docker-compose.yml)
foreach ($image in $images)
{
docker manifest create --amend $image `
"$($image)-linux-arm64" `
"$($image)-linux-amd64"
docker manifest push $image
}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
spec:
selector:
app: elasticsearch
ports:
- name: elasticsearch
port: 9200
targetPort: 9200
type: ClusterIP

View File

@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
spec:
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
containers:
- image: kiamol/ch13-elasticsearch
name: elasticsearch
ports:
- containerPort: 9200
name: elasticsearch

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: kiamol-ch13-logging
labels:
kiamol: ch13

View File

@@ -0,0 +1,39 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.*
Path /var/log/containers/timecheck*.log
Parser docker
Refresh_Interval 10
output.conf: |
[OUTPUT]
Name stdout
Format json_lines
Match kube.*
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluent-bit
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
spec:
selector:
matchLabels:
app: fluent-bit
template:
metadata:
labels:
app: fluent-bit
spec:
serviceAccountName: fluent-bit
containers:
- name: fluent-bit
image: fluent/fluent-bit:1.8.11
volumeMounts:
- name: fluent-bit-config
mountPath: /fluent-bit/etc/
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: fluent-bit-config
configMap:
name: fluent-bit-config
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
# RBAC configuration - ignore this until we get to chapter 17 :)
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluent-bit
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluent-bit
labels:
kiamol: ch13
rules:
- apiGroups: [""]
resources:
- namespaces
- pods
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: fluent-bit
labels:
kiamol: ch13
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: fluent-bit
subjects:
- kind: ServiceAccount
name: fluent-bit
namespace: kiamol-ch13-logging

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
output.conf: |
[OUTPUT]
Name es
Match kube.kiamol-ch13-test.*
Host elasticsearch
Index test
Generate_ID On
[OUTPUT]
Name es
Match kube.kube-system.*
Host elasticsearch
Index sys
Generate_ID On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$

View File

@@ -0,0 +1,83 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
Annotations Off
Merge_Log On
K8S-Logging.Parser On
[FILTER]
Name grep
Match kube.kiamol-ch13-test.api.numbers-api*
Regex priority [234]
output.conf: |
[OUTPUT]
Name es
Match kube.kiamol-ch13-test.*
Host elasticsearch
Index test
Generate_ID On
[OUTPUT]
Name es
Match kube.kube-system.*
Host elasticsearch
Index sys
Generate_ID On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$
[PARSER]
Name nginx
Format regex
Regex ^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?$
Time_Key time
Time_Format %d/%b/%Y:%H:%M:%S %z
[PARSER]
Name dotnet-syslog
Format regex
Regex ^\<(?<priority>[0-9]+)\>*(?<message>.*)$

View File

@@ -0,0 +1,57 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
output.conf: |
[OUTPUT]
Name stdout
Format json_lines
Match kube.kiamol-ch13-test.*
[OUTPUT]
Name counter
Match kube.kiamol-ch13-dev.*
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$

View File

@@ -0,0 +1,53 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
output.conf: |
[OUTPUT]
Name stdout
Format json_lines
Match kube.kiamol-ch13-test.*
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$

View File

@@ -0,0 +1,79 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
Annotations Off
Merge_Log On
K8S-Logging.Parser On
output.conf: |
[OUTPUT]
Name es
Match kube.kiamol-ch13-test.*
Host elasticsearch
Index test
Generate_ID On
[OUTPUT]
Name es
Match kube.kube-system.*
Host elasticsearch
Index sys
Generate_ID On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$
[PARSER]
Name nginx
Format regex
Regex ^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?$
Time_Key time
Time_Format %d/%b/%Y:%H:%M:%S %z
[PARSER]
Name dotnet-syslog
Format regex
Regex ^\<(?<priority>[0-9]+)\>*(?<message>.*)$
Time_Key time
Time_Format %b %d %H:%M:%S

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
spec:
selector:
app: kibana
ports:
- name: kibana
port: 5601
targetPort: 5601
type: LoadBalancer

View File

@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: kiamol-ch13-logging
labels:
kiamol: ch13
spec:
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- image: kiamol/ch13-kibana
name: kibana
ports:
- containerPort: 5601
name: kibana

View File

@@ -0,0 +1,71 @@
# Ch13 lab
## Setup
Deploy the logging subsystem:
```
kubectl apply -f lab/logging/
```
## Sample Solution
### Part 1
Fluent Bit is [configured](./logging/fluentbit-config.yaml) to store entries from Pods in the namespace `kiamol-ch13-lab`.
Create that namespace and deploy the vweb app:
```
kubectl create ns kiamol-ch13-lab
kubectl apply -f lab/vweb/ -n kiamol-ch13-lab
```
> Browse to the app which will trigger Nginx logs in the container.
> Browse to Kibana and create an index pattern for the index called `lab` - verify that logs are coming through.
![Lab part 1 - logs from Nginx](./docs/part1.png)
## Part 2
The logs are there, but not parsed so they're just text.
Deploy an update to the [vweb Deployment](./solution/vweb-with-parser.yaml) which adds an annotation to use the Nginx parser:
```
kubectl apply -f lab/solution/vweb-with-parser.yaml -n kiamol-ch13-lab
```
> Browse to the app again and refresh Kibana - the logs should have fields for HTTP path, response code etc.
![Lab part 2 - parsed fields from Nginx](./docs/part2.png)
### Part 3
Now to filter out 304s, add a [grep filter](./solution/fluentbit-config-grep.yaml) to the Fluent Bit config and update the Pod:
```
kubectl apply -f lab/solution/fluentbit-config-grep.yaml
kubectl rollout restart ds/fluent-bit -n kiamol-ch13-lab-logging
kubectl wait --for=condition=ContainersReady pod -l app=fluent-bit -n kiamol-ch13-lab-logging
```
> Browse to the app again and refresh Kibana - no 304s are shown for recent requests
![Lab part 3 - excluding 304s from Nginx](./docs/part3.png)
## Teardown
Remove the namespaces and that removes everything:
```
kubectl delete ns kiamol-ch13-lab
kubectl delete ns kiamol-ch13-lab-logging
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: kiamol-ch13-lab-logging
labels:
kiamol: ch13-lab

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: kiamol-ch13-lab-logging
spec:
selector:
app: elasticsearch
ports:
- name: elasticsearch
port: 9200
targetPort: 9200
type: ClusterIP

View File

@@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
namespace: kiamol-ch13-lab-logging
spec:
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
containers:
- image: kiamol/ch13-elasticsearch
name: elasticsearch
ports:
- containerPort: 9200
name: elasticsearch

View File

@@ -0,0 +1,62 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-lab-logging
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level warn
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
Merge_Log On
K8S-Logging.Parser On
output.conf: |
[OUTPUT]
Name es
Match kube.kiamol-ch13-lab.*
Host elasticsearch
Index lab
Generate_ID On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$
[PARSER]
Name nginx
Format regex
Regex ^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?
Time_Key time
Time_Format %d/%b/%Y:%H:%M:%S %z

View File

@@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluent-bit
namespace: kiamol-ch13-lab-logging
spec:
selector:
matchLabels:
app: fluent-bit
template:
metadata:
labels:
app: fluent-bit
spec:
containers:
- name: fluent-bit
image: fluent/fluent-bit:1.4.6
volumeMounts:
- name: fluent-bit-config
mountPath: /fluent-bit/etc/
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: fluent-bit-config
configMap:
name: fluent-bit-config
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: kiamol-ch13-lab-logging
spec:
selector:
app: kibana
ports:
- name: kibana
port: 5601
targetPort: 5601
type: LoadBalancer

View File

@@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: kiamol-ch13-lab-logging
spec:
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- image: kiamol/ch13-kibana
name: kibana
ports:
- containerPort: 5601
name: kibana

View File

@@ -0,0 +1,68 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: fluent-bit-config
namespace: kiamol-ch13-lab-logging
data:
fluent-bit.conf: |
[SERVICE]
Flush 5
Log_Level error
Daemon off
Parsers_File parsers.conf
@INCLUDE input.conf
@INCLUDE filter.conf
@INCLUDE output.conf
input.conf: |
[INPUT]
Name tail
Tag kube.<namespace_name>.<container_name>.<pod_name>.<docker_id>-
Tag_Regex (?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\.log$
Path /var/log/containers/*.log
Parser docker
Refresh_Interval 10
filter.conf: |
[FILTER]
Name kubernetes
Match kube.*
Kube_Tag_Prefix kube.
Regex_Parser kube-tag
Annotations Off
Merge_Log On
K8S-Logging.Parser On
[FILTER]
Name grep
Match kube.kiamol-ch13-lab.web.vweb*
Exclude code 304
output.conf: |
[OUTPUT]
Name es
Match kube.kiamol-ch13-lab.*
Host elasticsearch
Index lab
Generate_ID On
parsers.conf: |
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
[PARSER]
Name kube-tag
Format regex
Regex ^(?<namespace_name>[^_]+)\.(?<container_name>.+)\.(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)\.(?<docker_id>[a-z0-9]{64})-$
[PARSER]
Name nginx
Format regex
Regex ^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?
Time_Key time
Time_Format %d/%b/%Y:%H:%M:%S %z

View File

@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vweb
spec:
selector:
matchLabels:
app: vweb
template:
metadata:
labels:
app: vweb
annotations:
fluentbit.io/parser: nginx
spec:
containers:
- name: web
image: kiamol/ch09-vweb:v1
ports:
- name: http
containerPort: 80

View File

@@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vweb
spec:
selector:
matchLabels:
app: vweb
template:
metadata:
labels:
app: vweb
spec:
containers:
- name: web
image: kiamol/ch09-vweb:v1
ports:
- name: http
containerPort: 80

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: vweb
spec:
ports:
- port: 8013
targetPort: http
selector:
app: vweb
type: LoadBalancer

View File

@@ -0,0 +1,39 @@
apiVersion: v1
kind: Service
metadata:
name: numbers-api
namespace: kiamol-ch13-test
spec:
ports:
- port: 80
targetPort: api
selector:
app: numbers-api
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api
namespace: kiamol-ch13-test
spec:
selector:
matchLabels:
app: numbers-api
template:
metadata:
labels:
app: numbers-api
spec:
containers:
- name: api
image: kiamol/ch03-numbers-api
ports:
- containerPort: 80
name: api
env:
- name: FailAfterCallCount
value: "1"
- name: UseFailureId
value: "true"

View File

@@ -0,0 +1,80 @@
apiVersion: v1
kind: Service
metadata:
name: numbers-api-proxy
namespace: kiamol-ch13-test
spec:
ports:
- port: 8080
targetPort: http
selector:
app: numbers-api-proxy
type: LoadBalancer
---
apiVersion: v1
kind: ConfigMap
metadata:
name: numbers-api-proxy-config
namespace: kiamol-ch13-test
data:
nginx.conf: |-
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=STATIC:1m inactive=24h max_size=1g;
server {
listen 80 default_server;
listen [::]:80 default_server;
location / {
proxy_pass http://numbers-api;
proxy_set_header Host $host;
proxy_cache STATIC;
proxy_cache_valid 20s;
add_header X-Cache $upstream_cache_status;
add_header X-Host $hostname;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api-proxy
namespace: kiamol-ch13-test
spec:
selector:
matchLabels:
app: numbers-api-proxy
template:
metadata:
labels:
app: numbers-api-proxy
spec:
containers:
- image: nginx:1.17-alpine
name: nginx
ports:
- containerPort: 80
name: http
volumeMounts:
- name: config
mountPath: "/etc/nginx/"
readOnly: true
- name: cache-volume
mountPath: /data/nginx/cache
volumes:
- name: config
configMap:
name: numbers-api-proxy-config
- name: cache-volume
emptyDir: {}

View File

@@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api
namespace: kiamol-ch13-test
spec:
selector:
matchLabels:
app: numbers-api
template:
metadata:
labels:
app: numbers-api
annotations:
fluentbit.io/parser: dotnet-syslog
spec:
containers:
- name: api
image: kiamol/ch03-numbers-api
ports:
- containerPort: 80
name: api
env:
- name: FailAfterCallCount
value: "1"
- name: UseFailureId
value: "true"

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: numbers-api-proxy
namespace: kiamol-ch13-test
spec:
selector:
matchLabels:
app: numbers-api-proxy
template:
metadata:
labels:
app: numbers-api-proxy
annotations:
fluentbit.io/parser: nginx
spec:
containers:
- image: nginx:1.17-alpine
name: nginx
ports:
- containerPort: 80
name: http
volumeMounts:
- name: config
mountPath: "/etc/nginx/"
readOnly: true
- name: cache-volume
mountPath: /data/nginx/cache
volumes:
- name: config
configMap:
name: numbers-api-proxy-config
- name: cache-volume
emptyDir: {}

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sleep
labels:
kiamol: ch13
spec:
selector:
matchLabels:
app: sleep
template:
metadata:
labels:
app: sleep
spec:
containers:
- name: sleep
image: kiamol/ch03-sleep
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Namespace
metadata:
name: kiamol-ch13-dev
labels:
kiamol: ch13
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: timecheck
namespace: kiamol-ch13-dev
spec:
selector:
matchLabels:
app: timecheck
template:
metadata:
labels:
app: timecheck
spec:
containers:
- name: timecheck
image: kiamol/ch07-timecheck
volumeMounts:
- name: logs-dir
mountPath: /logs
- name: logger
image: kiamol/ch03-sleep
command: ['sh', '-c', 'tail -f /logs-ro/timecheck.log']
volumeMounts:
- name: logs-dir
mountPath: /logs-ro
readOnly: true
volumes:
- name: logs-dir
emptyDir: {}

View File

@@ -0,0 +1,72 @@
apiVersion: v1
kind: Namespace
metadata:
name: kiamol-ch13-test
labels:
kiamol: ch13
---
apiVersion: v1
kind: ConfigMap
metadata:
name: timecheck-config
namespace: kiamol-ch13-test
data:
appsettings.json: |-
{
"Application": {
"Version": "1.1",
"Environment": "TEST"
},
"Timer": {
"IntervalSeconds": "10"
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: timecheck
namespace: kiamol-ch13-test
spec:
replicas: 2
selector:
matchLabels:
app: timecheck
template:
metadata:
labels:
app: timecheck
spec:
initContainers:
- name: init-config
image: kiamol/ch03-sleep
command: ['sh', '-c', 'cp /config-in/appsettings.json /config-out/appsettings.json']
volumeMounts:
- name: config-map
mountPath: /config-in
- name: config-dir
mountPath: /config-out
containers:
- name: timecheck
image: kiamol/ch07-timecheck
volumeMounts:
- name: config-dir
mountPath: /config
readOnly: true
- name: logs-dir
mountPath: /logs
- name: logger
image: kiamol/ch03-sleep
command: ['sh', '-c', 'tail -f /logs-ro/timecheck.log']
volumeMounts:
- name: logs-dir
mountPath: /logs-ro
readOnly: true
volumes:
- name: config-map
configMap:
name: timecheck-config
- name: config-dir
emptyDir: {}
- name: logs-dir
emptyDir: {}