From 83c5af64d80e29971ef5e724fb85eb3750a0fb47 Mon Sep 17 00:00:00 2001 From: FetenDridi Date: Sat, 7 Dec 2024 13:11:08 +0100 Subject: [PATCH] modifications --- __main__.py | 148 ++++++++------- config.json | 263 ++++++++++++--------------- pg_stat_statements.sql | 10 + postgres_metrics_exporter.Dockerfile | 12 ++ postgresql.conf | 4 +- prometheus.yml | 2 +- prometheus_exporter.py | 48 +++-- requirements2.txt | 2 + 8 files changed, 248 insertions(+), 241 deletions(-) create mode 100644 pg_stat_statements.sql create mode 100644 postgres_metrics_exporter.Dockerfile create mode 100644 requirements2.txt diff --git a/__main__.py b/__main__.py index fb75224..2a8f616 100644 --- a/__main__.py +++ b/__main__.py @@ -3,79 +3,95 @@ import pulumi_docker as docker import json import os -# Load the JSON configuration file +# Load config.json try: with open("config.json", "r") as f: containers_data = json.load(f) except FileNotFoundError: - raise Exception("Error: 'config.json' file not found.") + raise Exception("Error: config.json not found.") # Create the network -try: - network = docker.Network("testNetwork") - -except Exception as e: - pulumi.log.error(f"Failed to create network: {e}") - network = None +network = docker.Network("testNetwork") + +# Helper function to create containers +def create_container(container_data, network, i=0): + container_name = f"{container_data['name']}-{i}" if container_data.get("instances", 1) > 1 else container_data["name"] + volumes = {} + for vol in container_data.get("volumes", []): + if "volume_name" in vol and vol["volume_name"] not in volumes: + volumes[vol["volume_name"]] = docker.Volume(vol["volume_name"]) + volumes_config = [ + docker.ContainerVolumeArgs(container_path=v["container_path"], volume_name=volumes[v["volume_name"]].name) if "volume_name" in v else + docker.ContainerVolumeArgs(container_path=v["container_path"], host_path=os.path.abspath(v["host_path"])) + for v in container_data.get("volumes", []) + ] + + envs = [f"{k}={v}" for k, v in container_data.get("envs", {}).items()] + + # Handle custom images: + image_name = container_data.get("image") #Get image name; Handle potential missing key + if image_name is None: + raise ValueError(f"Missing 'image' key in container data: {container_data}") + + image = None + if image_name.endswith(".dockerfile"): + image_name = image_name[:-len(".dockerfile")] + image = docker.Image( + f"localhost:5000/{image_name}_image:latest", # Fully qualified name with image_name + build=docker.DockerBuildArgs( + context="./", + dockerfile=image_name + ".dockerfile" + ), + ) + container_image = image.id + else: + container_image = image_name + + container = docker.Container( + container_name, + image=container_image, + hostname=container_name, + envs=envs, + ports=[docker.ContainerPortArgs(internal=p["internal"], external=p["external"] + i) for p in container_data.get("ports", [])], + volumes=volumes_config, + network_mode=network.name, + ) + ports = container_data.get("ports", []) + if ports: + for port in ports: + external_port = port["external"] + i + pulumi.export(f"{container_name}_url", f"http://localhost:{external_port}") + return container + # Create containers -for container in containers_data.get("containers", []): - instances = container.get("instances", 1) +containers = [] +for container_data in containers_data.get("containers", []): + instances = container_data.get("instances", 1) for i in range(instances): - container_name = f"{container['name']}-{i}" if instances > 1 else container["name"] - - # Configure volumes - volumes = {} - for volume in container.get("volumes", []): - try: - if "volume_name" in volume and volume["volume_name"] not in volumes: - volumes[volume["volume_name"]] = docker.Volume(volume["volume_name"]) - except Exception as e: - pulumi.log.error(f"Failed to create volume {volume.get('volume_name')}: {e}") - volumes_config = [] - try: - if "volumes" in container: - volumes_config = [ - docker.ContainerVolumeArgs( - container_path=volume["container_path"], - volume_name=volumes[volume["volume_name"]].name - ) if "volume_name" in volume else - docker.ContainerVolumeArgs( - container_path=volume["container_path"], - host_path=os.path.abspath(volume["host_path"]) - ) - for volume in container["volumes"] - ] - except KeyError as e: - pulumi.log.warn(f"Missing key in volume configuration: {e}") - except Exception as e: - pulumi.log.error(f"Error configuring volumes for container {container_name}: {e}") + containers.append(create_container(container_data, network, i)) - # Create the container - try: - container_resource = docker.Container( - container_name, - image=container["image"], - hostname=container_name, - envs=[ - f"{key}={value}" for key, value in container.get("envs", {}).items() - ] if "envs" in container else [], - ports=[ - docker.ContainerPortArgs( - internal=port["internal"], - external=port["external"] + i - ) for port in container.get("ports", []) - ] if "ports" in container else [], - volumes=volumes_config, - network_mode=network.name if network else None, - ) - ports = container.get("ports", []) - if ports: - for port in ports: - external_port = port["external"] + i - pulumi.export( - f"{container_name}_url", - f"http://localhost:{external_port}" - ) - except Exception as e: - pulumi.log.error(f"Failed to create container {container_name}: {e}") + +# Prometheus Exporter (Separate Container) +exporter_image = docker.Image( + "localhost:5000/postgres_metrics_exporter_image:latest", + build=docker.DockerBuildArgs( + context="./", + dockerfile="postgres_metrics_exporter.Dockerfile" + ), +) + +exporter_container = docker.Container( + "postgres_metrics_exporter", + image=exporter_image.id, + ports=[docker.ContainerPortArgs(internal=8000, external=8000)], + network_mode=network.name, + env=[ + "POSTGRES_HOST=admin", + "POSTGRES_DB=admin", + "POSTGRES_USER=admin", + "POSTGRES_PASSWORD=admin", + ], +) + +pulumi.export("exporter_url", f"http://localhost:8000") \ No newline at end of file diff --git a/config.json b/config.json index 4ab2cd4..100c340 100644 --- a/config.json +++ b/config.json @@ -1,149 +1,120 @@ { - "containers": [ - { - "name": "admin", - "image": "postgres:latest", - "envs": { - "POSTGRES_DB": "admin", - "POSTGRES_USER": "admin", - "POSTGRES_PASSWORD": "admin" + "containers": [ + { + "name": "admin", + "image": "postgres:latest", + "envs": { + "POSTGRES_DB": "admin", + "POSTGRES_USER": "admin", + "POSTGRES_PASSWORD": "admin" + }, + "network_mode": "testNetwork", + "ports": [ { "internal": 5432, "external": 5432 } ], + "volumes": [ + { "container_path": "/var/lib/postgresql/data", "volume_name": "postgres-data" }, + { "container_path": "/etc/postgresql/postgresql.conf", "host_path": "./postgresql.conf" }, + { "container_path": "/docker-entrypoint-initdb.d/pg_stat_statements.sql", "host_path": "./pg_stat_statements.sql" } + ] + }, + { + "name": "postgres_exporter", + "image": "wrouesnel/postgres_exporter:latest", + "envs": { + "DATA_SOURCE_NAME": "postgresql://admin:admin@admin:5432/admin?sslmode=disable" + }, + "network_mode": "testNetwork", + "ports": [{ "internal": 9187, "external": 9187 }] + }, + { + "name": "pgadmin", + "image": "dpage/pgadmin4:latest", + "envs": { + "PGADMIN_DEFAULT_EMAIL": "admin@admin.com", + "PGADMIN_DEFAULT_PASSWORD": "admin" + }, + "network_mode": "testNetwork", + "ports": [{ "internal": 80, "external": 5050 }] + }, + { + "name": "odoo", + "image": "odoo.dockerfile", + "envs": { + "HOST": "admin", + "USER": "admin", + "PASSWORD": "admin", + "DATABASE": "admin", + "ODOO_PASSWORD": "admin" + }, + "network_mode": "testNetwork", + "ports": [{ "internal": 8069, "external": 8069 }], + "instances": 3, + "volumes": [ + { + "host_path": "./odoo.conf", + "container_path": "/etc/odoo/odoo.conf" + } + ] + }, + { + "name": "grafana", + "image": "grafana/grafana:latest", + "envs": { + "GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd", + "GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090" + }, + "network_mode": "testNetwork", + "ports": [{ "internal": 3000, "external": 3000 }], + "instances": 2 + }, + { + "name": "prometheus", + "image": "prom/prometheus:latest", + "network_mode": "testNetwork", + "ports": [{ "internal": 9090, "external": 9090 }], + "volumes": [ + { + "container_path": "/prometheus", + "volume_name": "prometheus-data" }, - "network_mode": "testNetwork", - "ports": [{"internal": 5432, "external": 5432}], - "volumes": [ - { - "container_path": "/var/lib/postgresql/data", - "volume_name": "postgres-data" - }, - { - "container_path": "/etc/postgresql/postgresql.conf", - "host_path": "./postgresql.conf" - } - ] + { + "container_path": "/etc/prometheus/prometheus.yml", + "host_path": "./prometheus.yml" + } + ] + }, + { + "name": "fluentd", + "image": "fluent/fluentd:v1.13-1", + "network_mode": "testNetwork", + "ports": [{ "internal": 24224, "external": 24224 }], + "volumes": [ + { + "container_path": "/fluentd/etc/fluent.conf", + "host_path": "./fluent.conf" + } + ] + }, + { + "name": "backup", + "image": "backup.dockerfile", + "envs": { + "POSTGRES_HOST": "admin", + "POSTGRES_DB": "admin", + "POSTGRES_USER": "admin", + "POSTGRES_PASSWORD": "admin" }, - - { - "name": "postgres_exporter", - "image": "wrouesnel/postgres_exporter:latest", - "envs": { - "DATA_SOURCE_NAME": "postgresql://admin:admin@admin:5432/admin?sslmode=disable" - }, - "network_mode": "testNetwork", - "ports": [{"internal": 9187, "external": 9187}] - }, - - { - "name": "pgadmin", - "image": "dpage/pgadmin4:latest", - "envs": { - "PGADMIN_DEFAULT_EMAIL": "admin@admin.com", - "PGADMIN_DEFAULT_PASSWORD": "admin" - }, - "network_mode": "testNetwork", - "ports": [{"internal": 80, "external": 5050}] - }, - - - - { - "name": "odoo", - "image": "odoo_custom", - "envs": { - "HOST": "admin", - "USER": "admin", - "PASSWORD": "admin", - "DATABASE": "admin", - "ODOO_PASSWORD": "admin" - }, - "network_mode": "testNetwork", - "ports": [{"internal": 8069, "external": 8069}], - "instances": 3, - "volumes": [ - { - "host_path": "./odoo.conf", - "container_path": "/etc/odoo/odoo.conf" - } - ] - }, - { - "name": "grafana", - "image": "grafana/grafana:latest", - "envs": { - "GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd", - "GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090" - }, - "network_mode": "testNetwork", - "ports": [{"internal": 3000, "external": 3000}], - "instances": 2 - }, - { - "name": "prometheus", - "image": "prom/prometheus:latest", - "network_mode": "testNetwork", - "ports": [{"internal": 9090, "external": 9090}], - "volumes": [ - { - "container_path": "/prometheus", - "volume_name": "prometheus-data" - }, - { - "container_path": "/etc/prometheus/prometheus.yml", - "host_path": "./prometheus.yml" - } - ] - }, - { - "name": "fluentd", - "image": "fluent/fluentd:v1.13-1", - "network_mode": "testNetwork", - "ports": [{"internal": 24224, "external": 24224}], - "volumes": [ - { - "container_path": "/fluentd/etc/fluent.conf", - "host_path": "./fluent.conf" - } - ] - }, - { - "name": "backup", - "image": "backup_custom", - "envs": { - "POSTGRES_HOST": "admin", - "POSTGRES_DB": "admin", - "POSTGRES_USER": "admin", - "POSTGRES_PASSWORD": "admin" - }, - "network_mode": "testNetwork", - "volumes": [ - { - "container_path": "/backup", - "volume_name": "backup-data" - } - ] - }, - { - "name": "postgres_metrics_exporter", - "image": "python:3.9-slim", - "command": [ - "python", - "-u", - "/scripts/prometheus_exporter.py" - ], - "volumes": [ - { - "host_path": "./prometheus_exporter.py", - "container_path": "/scripts/prometheus_exporter.py" - } - ], - "network_mode": "testNetwork", - "ports": [ - { - "internal": 8000, - "external": 8000 - } - ] - } - ] - } - - \ No newline at end of file + "network_mode": "testNetwork", + "volumes": [ + { + "container_path": "/backup", + "volume_name": "backup-data" + } + ] + }, + { + "name": "postgres_metrics_exporter", + "image": "postgres_metrics_exporter.dockerfile", + "ports": [{ "internal": 8000, "external": 8000 }] + } + ] +} \ No newline at end of file diff --git a/pg_stat_statements.sql b/pg_stat_statements.sql new file mode 100644 index 0000000..a714643 --- /dev/null +++ b/pg_stat_statements.sql @@ -0,0 +1,10 @@ +-- Créer l'extension pg_stat_statements +CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + +-- Configurer les paramètres pg_stat_statements +ALTER SYSTEM SET pg_stat_statements.max = 10000; +ALTER SYSTEM SET pg_stat_statements.track = 'all'; +ALTER SYSTEM SET pg_stat_statements.save = 'on'; + +-- Recharger la configuration +SELECT pg_reload_conf(); diff --git a/postgres_metrics_exporter.Dockerfile b/postgres_metrics_exporter.Dockerfile new file mode 100644 index 0000000..26fb3eb --- /dev/null +++ b/postgres_metrics_exporter.Dockerfile @@ -0,0 +1,12 @@ +# Dockerfile for postgres_metrics_exporter +FROM python:3.9-slim-buster + +WORKDIR /app + +COPY prometheus_exporter.py . +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements2.txt + +CMD ["python", "-u", "prometheus_exporter.py"] + diff --git a/postgresql.conf b/postgresql.conf index 81b8b7a..172f421 100644 --- a/postgresql.conf +++ b/postgresql.conf @@ -1,3 +1 @@ -shared_preload_libraries = 'pg_stat_statements' -pg_stat_statements.max = 10000 -pg_stat_statements.track = all +shared_preload_libraries = 'pg_stat_statements' \ No newline at end of file diff --git a/prometheus.yml b/prometheus.yml index 870113e..ca17776 100644 --- a/prometheus.yml +++ b/prometheus.yml @@ -8,6 +8,6 @@ scrape_configs: - job_name: 'pg_stat_statements' static_configs: - - targets: ['postgres_metrics_exporter:8000'] + - targets: ['localhost:8000'] diff --git a/prometheus_exporter.py b/prometheus_exporter.py index a218278..f965617 100644 --- a/prometheus_exporter.py +++ b/prometheus_exporter.py @@ -1,50 +1,48 @@ -from prometheus_client import start_http_server, Gauge +from prometheus_client import start_http_server, Gauge, REGISTRY import psycopg2 import time +import os + +# Configuration from environment variables +DB_HOST = os.environ.get("POSTGRES_HOST", "admin") +DB_NAME = os.environ.get("POSTGRES_DB", "admin") +DB_USER = os.environ.get("POSTGRES_USER", "admin") +DB_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "admin") -# Configuration de la connexion à la base de données conn_params = { - "host": "admin", - "database": "admin", - "user": "admin", - "password": "admin" + "host": DB_HOST, + "database": DB_NAME, + "user": DB_USER, + "password": DB_PASSWORD } -# Définition des métriques Prometheus -QUERY_CALLS = Gauge('postgresql_query_calls', 'Nombre d\'appels de requêtes PostgreSQL', ['query']) -QUERY_TOTAL_TIME = Gauge('postgresql_query_total_time_ms', 'Temps total des requêtes PostgreSQL en millisecondes', ['query']) +QUERY_CALLS = Gauge('postgresql_query_calls', 'Number of PostgreSQL query calls', ['query']) +QUERY_TOTAL_TIME = Gauge('postgresql_query_total_time_ms', 'Total time of PostgreSQL queries (ms)', ['query']) + def generate_metrics(): try: - # Connexion à la base de données conn = psycopg2.connect(**conn_params) cur = conn.cursor() - - # Exécution de la requête pour récupérer les informations sur les requêtes cur.execute(""" - SELECT - query, - calls, - total_time + SELECT query, calls, total_time FROM pg_stat_statements ORDER BY total_time DESC; """) - # Mise à jour des métriques Prometheus for row in cur: - query = row[0].replace("\\", "\\\\").replace('"', '\\"') # échappement pour Prometheus + query = row[0].replace("\\", "\\\\").replace('"', '\\"') QUERY_CALLS.labels(query=query).set(row[1]) - QUERY_TOTAL_TIME.labels(query=query).set(row[2] * 1000) # Convertir le temps en millisecondes + QUERY_TOTAL_TIME.labels(query=query).set(row[2] * 1000) cur.close() conn.close() - except psycopg2.Error as e: - print(f"Erreur de connexion à la base de données: {e}") + print(f"Error connecting to the database: {e}", flush=True) + -# Fonction principale pour démarrer l'exporteur if __name__ == '__main__': - start_http_server(8000) # Démarre un serveur HTTP sur le port 8000 pour exposer les métriques + start_http_server(8000) while True: - generate_metrics() # Génére les métriques - time.sleep(60) # Intervalle d'exécution, ici toutes les 60 secondes + generate_metrics() + time.sleep(60) \ No newline at end of file diff --git a/requirements2.txt b/requirements2.txt new file mode 100644 index 0000000..aa5ecfe --- /dev/null +++ b/requirements2.txt @@ -0,0 +1,2 @@ +psycopg2-binary +prometheus-client \ No newline at end of file