Initial commit

This commit is contained in:
2025-12-09 19:34:54 +11:00
commit a4d98eea50
894 changed files with 131646 additions and 0 deletions

View File

@@ -0,0 +1,82 @@
# Loki Helm Scenarios
These scenarios are used by Github Workflow: [Publish Rendered Helm Chart Diff](../../../../.github/workflows/helm-diff-ci.yml).
Each scenario is used in a different job execution that will be used to deploy loki inside a K3D cluster in our github action workflow.
We deploy the scenario with the latest release and then we execute a helm diff with the current version in the workspace, the diff between the release deployed will be posted in the pull request inside a comment like [this](https://github.com/grafana/loki/pull/15734#issuecomment-2592439539) making clear to review.
>*NOTE*: the helm diff output file will be available for each scenario inside github action to download for 2 days, after this you may need to re-run the job if you would like to download the output files.
## Add new scenario to the CI
To add a new scenario in the CI, you would just add a new entry to the matrix configuration:
```
strategy:
matrix:
scenario:
- name: New Scenario
values_file: new-scenario-values.yaml
use_k3d: true # or false depending on requirements
```
## Run scenarios locally
All this process that we run in the CI can be done locally, the following steps would explain how.
## Requirements
To run locally you will need the following tools in your local environment
* k3d or any kubernetes cluster
* helm
* [helm-diff plugin](https://github.com/databus23/helm-diff)
## Run
Make sure that you are pointing to the kubernetes cluster that you want to apply the chart and validate.
Create a `${scenario}-values.yaml` file with the configuration that you would like to validate.
Deploy in your kubernetes cluster the latest released version of the helm chart with your config file:
```shell
helm install --create-namespace loki-release grafana/loki -f ${scenario}-values.yaml
```
Then run the helm diff plugin to compare the local helm chart to see the upgrade changes that will happen:
```shell
HELM_DIFF_USE_UPGRADE_DRY_RUN: true helm diff upgrade loki-release -f ${scenario}-values.yaml production/helm/loki
```
The helm diff plugin will compare all manifests that are created in your local development kubernetes cluster and the generated by helm upgrade operation, the output will be printed in your terminal.
### Configs for CSP specific
To compare the changes specifically for a cloud provider the process is similar, the main difference that you will need to have access to the kubernetes cluster with the right permissions inside the cloud provider.
In case that is not possible, the quick validation can be done in a different way, instead of deploy to the kubernetes cluster, we generate the manifests with helm like this:
```shell
helm template loki-release grafana/loki -f ${scenario}-values.yaml > release-manifest.yaml
```
Then we make the same process with local chart version
```shell
helm template loki-release production/helm/loki -f ${scenario}-values.yaml > current-manifest.yaml
```
As the last step you need to run a diff between both files:
```shell
diff current-manifest.yaml release-manifest.yaml
```
### Known Issues
* The Github Action won't be able to post the diff comment if the PR is coming from a fork, because of permissions the workflow run from a fork is not able to write in the PR content.
In this case, to review the output we recommend to download the artifacts in the workflow run and check the outputs.

View File

@@ -0,0 +1,71 @@
---
loki:
schemaConfig:
configs:
- from: 2024-04-01
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_index_
period: 24h
ingester:
chunk_encoding: snappy
tracing:
enabled: true
querier:
# Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
max_concurrent: 4
#gateway:
# ingress:
# enabled: true
# hosts:
# - host: FIXME
# paths:
# - path: /
# pathType: Prefix
deploymentMode: Distributed
ingester:
replicas: 3
querier:
replicas: 3
maxUnavailable: 2
queryFrontend:
replicas: 2
maxUnavailable: 1
queryScheduler:
replicas: 2
distributor:
replicas: 3
maxUnavailable: 2
compactor:
replicas: 1
indexGateway:
replicas: 2
maxUnavailable: 1
# optional experimental components
bloomPlanner:
replicas: 0
bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
# Enable minio for storage
minio:
enabled: true
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
singleBinary:
replicas: 0

View File

@@ -0,0 +1,16 @@
---
loki:
commonConfig:
replication_factor: 1
useTestSchema: true
storage:
bucketNames:
chunks: chunks
ruler: ruler
admin: admin
read:
replicas: 1
write:
replicas: 1
backend:
replicas: 1

View File

@@ -0,0 +1,30 @@
---
gateway:
ingress:
enabled: true
annotations: {}
hosts:
- host: gateway.loki.example.com
paths:
- path: /
pathType: Prefix
loki:
commonConfig:
replication_factor: 1
useTestSchema: true
storage:
bucketNames:
chunks: chunks
ruler: ruler
admin: admin
read:
replicas: 1
write:
replicas: 1
backend:
replicas: 1
monitoring:
lokiCanary:
enabled: false
test:
enabled: false

View File

@@ -0,0 +1,27 @@
---
loki:
commonConfig:
replication_factor: 1
useTestSchema: true
storage:
bucketNames:
chunks: chunks
ruler: ruler
admin: admin
read:
replicas: 1
write:
replicas: 1
backend:
replicas: 1
monitoring:
enabled: true
selfMonitoring:
enabled: true
grafanaAgent:
installOperator: true
serviceMonitor:
labels:
release: "prometheus"
test:
prometheusAddress: "http://prometheus-kube-prometheus-prometheus.prometheus.svc.cluster.local.:9090"

View File

@@ -0,0 +1,67 @@
loki:
# -- Storage config. Providing this will automatically populate all necessary storage configs in the templated config.
storage:
# Loki requires a bucket for chunks and the ruler. GEL requires a third bucket for the admin API.
# Please provide these values if you are using object storage.
bucketNames:
chunks: aws-s3-chunks-bucket
ruler: aws-s3-ruler-bucket
admin: aws-s3-admin-bucket
type: s3
s3:
region: eu-central-1
# -- Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas
schemaConfig:
configs:
- from: "2023-09-19"
index:
period: 1d
prefix: tsdb_index_
object_store: s3
schema: v13
store: tsdb
######################################################################################################################
#
# Enterprise Loki Configs
#
######################################################################################################################
# -- Configuration for running Enterprise Loki
enterprise:
# Enable enterprise features, license must be provided
enabled: true
# -- Grafana Enterprise Logs license
license:
contents: "content of licence"
tokengen:
annotations: {
eks.amazonaws.com/role-arn: arn:aws:iam::2222222:role/test-role
}
# -- Configuration for `provisioner` target
provisioner:
# -- Additional annotations for the `provisioner` Job
annotations: {
eks.amazonaws.com/role-arn: arn:aws:iam::2222222:role/test-role
}
######################################################################################################################
#
# Service Accounts and Kubernetes RBAC
#
######################################################################################################################
serviceAccount:
# -- Annotations for the service account
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::2222222:role/test-role
# Configuration for the write pod(s)
write:
persistence:
storageClass: gp2
# -- Configuration for the read pod(s)
read:
persistence:
storageClass: gp2
# -- Configuration for the backend pod(s)
backend:
persistence:
storageClass: gp2

View File

@@ -0,0 +1,53 @@
---
loki:
commonConfig:
replication_factor: 1
useTestSchema: true
storage:
type: s3
use_thanos_objstore: true
object_store:
type: s3
s3:
access_key_id: thanos-minio
secret_access_key: thanos-minio123
region: us-east-1
insecure: true
endpoint: http://minio.minio.svc.cluster.local:9000
http:
tls_config:
insecure_skip_verify: true
# GCS configuration (when type is "GCS")
gcs:
bucket_name: test-gcs # Name of the bucket
service_account: service-account-test.json # Optional service account JSON
# Azure configuration (when type is "AZURE")
azure:
account_name: azure-test # Storage account name
account_key: 1234567890 # Optional storage account key
bucketNames:
chunks: chunks_thanos
ruler: ruler_thanos
admin: admin_thanos
enterprise:
enabled: true
adminApi:
enabled: true
minio:
enabled: true
read:
replicas: 1
write:
replicas: 1
backend:
replicas: 1