Compare commits
No commits in common. "main" and "last" have entirely different histories.
5
.env
Normal file
5
.env
Normal file
@ -0,0 +1,5 @@
|
||||
POSTGRES_PASSWORD=openpgpwd
|
||||
POSTGRES_USER=openpg
|
||||
POSTGRES_DB=pulumi
|
||||
ODOO_PASSWORD=admin
|
||||
GRAFANA_PASSWORD=grafana_pwd
|
@ -1,5 +1,5 @@
|
||||
name: pulumi4
|
||||
description: pulumi
|
||||
name: projet
|
||||
description: projet pulumi docker
|
||||
runtime:
|
||||
name: python
|
||||
options:
|
||||
|
139
__main__.py
139
__main__.py
@ -1,82 +1,97 @@
|
||||
import pulumi
|
||||
import pulumi_docker as docker
|
||||
import json
|
||||
import os
|
||||
|
||||
# Load the configuration from the JSON file
|
||||
with open('config.json') as f:
|
||||
config_data = json.load(f)
|
||||
# Load config.json
|
||||
try:
|
||||
with open("config.json", "r") as f:
|
||||
containers_data = json.load(f)
|
||||
except FileNotFoundError:
|
||||
raise Exception("Error: config.json not found.")
|
||||
|
||||
# Create a Docker network
|
||||
# Create the network
|
||||
network = docker.Network("testNetwork")
|
||||
|
||||
# Initialize the containers list
|
||||
containers = []
|
||||
# Helper function to create containers
|
||||
def create_container(container_data, network, i=0):
|
||||
container_name = f"{container_data['name']}-{i}" if container_data.get("instances", 1) > 1 else container_data["name"]
|
||||
volumes = {}
|
||||
for vol in container_data.get("volumes", []):
|
||||
if "volume_name" in vol and vol["volume_name"] not in volumes:
|
||||
volumes[vol["volume_name"]] = docker.Volume(vol["volume_name"])
|
||||
volumes_config = [
|
||||
docker.ContainerVolumeArgs(container_path=v["container_path"], volume_name=volumes[v["volume_name"]].name) if "volume_name" in v else
|
||||
docker.ContainerVolumeArgs(container_path=v["container_path"], host_path=os.path.abspath(v["host_path"]))
|
||||
for v in container_data.get("volumes", [])
|
||||
]
|
||||
|
||||
envs = [f"{k}={v}" for k, v in container_data.get("envs", {}).items()]
|
||||
|
||||
# Initialize the URL dictionary for export
|
||||
urls = {}
|
||||
# Handle custom images:
|
||||
image_name = container_data.get("image") #Get image name; Handle potential missing key
|
||||
if image_name is None:
|
||||
raise ValueError(f"Missing 'image' key in container data: {container_data}")
|
||||
|
||||
# Create containers based on the configuration
|
||||
for container in config_data["containers"]:
|
||||
container_name = container["name"]
|
||||
container_envs = [f"{key}={value}" for key, value in container.get("env", {}).items()]
|
||||
image = None
|
||||
if image_name.endswith(".dockerfile"):
|
||||
image_name = image_name[:-len(".dockerfile")]
|
||||
image = docker.Image(
|
||||
f"localhost:5000/{image_name}_image:latest", # Fully qualified name with image_name
|
||||
build=docker.DockerBuildArgs(
|
||||
context="./",
|
||||
dockerfile=image_name + ".dockerfile"
|
||||
),
|
||||
)
|
||||
container_image = image.id
|
||||
else:
|
||||
container_image = image_name
|
||||
|
||||
# Create the container
|
||||
docker_container = docker.Container(
|
||||
container = docker.Container(
|
||||
container_name,
|
||||
name=container_name,
|
||||
image=container["image"],
|
||||
envs=container_envs,
|
||||
ports=[docker.ContainerPortArgs(
|
||||
internal=port["internal"],
|
||||
external=port["external"]
|
||||
) for port in container["ports"]],
|
||||
image=container_image,
|
||||
hostname=container_name,
|
||||
envs=envs,
|
||||
ports=[docker.ContainerPortArgs(internal=p["internal"], external=p["external"] + i) for p in container_data.get("ports", [])],
|
||||
volumes=volumes_config,
|
||||
network_mode=network.name,
|
||||
command=container.get("command", None) or container.get("entrypoint", None),
|
||||
##container.get("entrypoint", "/usr/local/bin/entrypoint.sh")
|
||||
|
||||
volumes=[docker.ContainerVolumeArgs(
|
||||
host_path=vol["host_path"],
|
||||
container_path=vol["container_path"]
|
||||
) for vol in container.get("volumes", [])]
|
||||
)
|
||||
containers.append(docker_container)
|
||||
ports = container_data.get("ports", [])
|
||||
if ports:
|
||||
for port in ports:
|
||||
external_port = port["external"] + i
|
||||
pulumi.export(f"{container_name}_url", f"http://localhost:{external_port}")
|
||||
return container
|
||||
|
||||
# Add the URLs for the container
|
||||
for port in container["ports"]:
|
||||
urls[f"{container_name}_url"] = f"http://localhost:{port['external']}"
|
||||
|
||||
# Scale Prometheus containers
|
||||
for i in range(config_data.get("prometheus_scale", 1)): # Default to 1 if not specified
|
||||
prometheus_instance = docker.Container(
|
||||
f"prometheus-instance-{i}",
|
||||
name=f"prometheus-{i}",
|
||||
image="prom/prometheus:latest",
|
||||
ports=[docker.ContainerPortArgs(internal=9090, external=9090 + i)],
|
||||
network_mode=network.name
|
||||
)
|
||||
containers.append(prometheus_instance)
|
||||
# Create containers
|
||||
containers = []
|
||||
for container_data in containers_data.get("containers", []):
|
||||
instances = container_data.get("instances", 1)
|
||||
for i in range(instances):
|
||||
containers.append(create_container(container_data, network, i))
|
||||
|
||||
# Add the Prometheus URLs
|
||||
urls[f"prometheus_{i}_url"] = f"http://localhost:{9090 + i}"
|
||||
|
||||
# Scale Fluentd containers
|
||||
fluentd_scale = config_data.get("fluentd_scale", 1) # Default to 1 if not specified
|
||||
# Prometheus Exporter (Separate Container)
|
||||
exporter_image = docker.Image(
|
||||
"localhost:5000/postgres_metrics_exporter_image:latest",
|
||||
build=docker.DockerBuildArgs(
|
||||
context="./",
|
||||
dockerfile="postgres_metrics_exporter.Dockerfile"
|
||||
),
|
||||
)
|
||||
|
||||
for i in range(fluentd_scale): # This will scale based on the value from config_data
|
||||
fluentd_instance = docker.Container(
|
||||
f"fluentd-instance-{i}",
|
||||
name=f"fluentd-{i}",
|
||||
image="fluent/fluentd:v1.14-1", # Corrected image name
|
||||
ports=[docker.ContainerPortArgs(internal=24224, external=24224 + i)], # Assign unique external port for each container
|
||||
network_mode=network.name
|
||||
)
|
||||
containers.append(fluentd_instance)
|
||||
exporter_container = docker.Container(
|
||||
"postgres_metrics_exporter",
|
||||
image=exporter_image.id,
|
||||
ports=[docker.ContainerPortArgs(internal=8000, external=8000)],
|
||||
network_mode=network.name,
|
||||
env=[
|
||||
"POSTGRES_HOST=admin",
|
||||
"POSTGRES_DB=admin",
|
||||
"POSTGRES_USER=admin",
|
||||
"POSTGRES_PASSWORD=admin",
|
||||
],
|
||||
)
|
||||
|
||||
# Add the Fluentd URLs
|
||||
urls[f"fluentd_{i}_url"] = f"http://localhost:{24224 + i}"
|
||||
|
||||
# Export network and container details
|
||||
pulumi.export("network_name", network.name)
|
||||
pulumi.export("containers", [c.name for c in containers])
|
||||
pulumi.export("urls", urls)
|
||||
pulumi.export("exporter_url", f"http://localhost:8000")
|
16
backup.Dockerfile
Normal file
16
backup.Dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
# Use the Alpine image as the base
|
||||
FROM alpine:latest as backup_custom
|
||||
|
||||
# Copy the entrypoint script into the container
|
||||
COPY entrypoint_backup.sh /usr/local/bin/entrypoint_backup.sh
|
||||
|
||||
# Switch to root user for setup
|
||||
USER root
|
||||
|
||||
# Make the entrypoint script executable
|
||||
RUN chmod +x /usr/local/bin/entrypoint_backup.sh
|
||||
|
||||
# Set the new entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint_backup.sh"]
|
||||
|
||||
|
163
config.json
163
config.json
@ -1,51 +1,120 @@
|
||||
{
|
||||
"containers": [
|
||||
{
|
||||
"name": "postgres",
|
||||
"image": "postgres:16.5",
|
||||
"env": {
|
||||
"POSTGRES_USER": "odoo",
|
||||
"POSTGRES_PASSWORD": "odoo",
|
||||
"POSTGRES_DB": "postgres",
|
||||
"POSTGRES_HOST_AUTH_METHOD": "trust"
|
||||
},
|
||||
"ports": [
|
||||
{"internal": 5432, "external": 5432}
|
||||
],
|
||||
"volumes": [
|
||||
{"host_path": "/local/path/postgresql/data", "container_path": "/var/lib/postgresql/data"}
|
||||
]
|
||||
"containers": [
|
||||
{
|
||||
"name": "admin",
|
||||
"image": "postgres:latest",
|
||||
"envs": {
|
||||
"POSTGRES_DB": "admin",
|
||||
"POSTGRES_USER": "admin",
|
||||
"POSTGRES_PASSWORD": "admin"
|
||||
},
|
||||
{
|
||||
"name": "odoo",
|
||||
"image": "odoo:latest",
|
||||
"env": {
|
||||
"HOST": "postgres",
|
||||
"USER": "odoo",
|
||||
"PASSWORD": "odoo",
|
||||
"DATABASE": "postgres",
|
||||
"ODOO_PASSWORD": "admin"
|
||||
},
|
||||
"ports": [
|
||||
{"internal": 8069, "external": 8069}
|
||||
],
|
||||
"command": [
|
||||
"/bin/bash", "-c", "until pg_isready -h postgres -U odoo; do echo 'Waiting for PostgreSQL...'; sleep 2; done; odoo -i base"
|
||||
]
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [ { "internal": 5432, "external": 5432 } ],
|
||||
"volumes": [
|
||||
{ "container_path": "/var/lib/postgresql/data", "volume_name": "postgres-data" },
|
||||
{ "container_path": "/etc/postgresql/postgresql.conf", "host_path": "./postgresql.conf" },
|
||||
{ "container_path": "/docker-entrypoint-initdb.d/pg_stat_statements.sql", "host_path": "./pg_stat_statements.sql" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "postgres_exporter",
|
||||
"image": "wrouesnel/postgres_exporter:latest",
|
||||
"envs": {
|
||||
"DATA_SOURCE_NAME": "postgresql://admin:admin@admin:5432/admin?sslmode=disable"
|
||||
},
|
||||
{
|
||||
"name": "grafana",
|
||||
"image": "grafana/grafana:latest",
|
||||
"env": {
|
||||
"GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd",
|
||||
"GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090"
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 9187, "external": 9187 }]
|
||||
},
|
||||
{
|
||||
"name": "pgadmin",
|
||||
"image": "dpage/pgadmin4:latest",
|
||||
"envs": {
|
||||
"PGADMIN_DEFAULT_EMAIL": "admin@admin.com",
|
||||
"PGADMIN_DEFAULT_PASSWORD": "admin"
|
||||
},
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 80, "external": 5050 }]
|
||||
},
|
||||
{
|
||||
"name": "odoo",
|
||||
"image": "odoo.dockerfile",
|
||||
"envs": {
|
||||
"HOST": "admin",
|
||||
"USER": "admin",
|
||||
"PASSWORD": "admin",
|
||||
"DATABASE": "admin",
|
||||
"ODOO_PASSWORD": "admin"
|
||||
},
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 8069, "external": 8069 }],
|
||||
"instances": 3,
|
||||
"volumes": [
|
||||
{
|
||||
"host_path": "./odoo.conf",
|
||||
"container_path": "/etc/odoo/odoo.conf"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "grafana",
|
||||
"image": "grafana/grafana:latest",
|
||||
"envs": {
|
||||
"GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd",
|
||||
"GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090"
|
||||
},
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 3000, "external": 3000 }],
|
||||
"instances": 2
|
||||
},
|
||||
{
|
||||
"name": "prometheus",
|
||||
"image": "prom/prometheus:latest",
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 9090, "external": 9090 }],
|
||||
"volumes": [
|
||||
{
|
||||
"container_path": "/prometheus",
|
||||
"volume_name": "prometheus-data"
|
||||
},
|
||||
"ports": [
|
||||
{"internal": 3000, "external": 3000}
|
||||
]
|
||||
}
|
||||
],
|
||||
"prometheus_scale": 2,
|
||||
"fluentd_scale": 2
|
||||
}
|
||||
|
||||
{
|
||||
"container_path": "/etc/prometheus/prometheus.yml",
|
||||
"host_path": "./prometheus.yml"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "fluentd",
|
||||
"image": "fluent/fluentd:v1.13-1",
|
||||
"network_mode": "testNetwork",
|
||||
"ports": [{ "internal": 24224, "external": 24224 }],
|
||||
"volumes": [
|
||||
{
|
||||
"container_path": "/fluentd/etc/fluent.conf",
|
||||
"host_path": "./fluent.conf"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "backup",
|
||||
"image": "backup.dockerfile",
|
||||
"envs": {
|
||||
"POSTGRES_HOST": "admin",
|
||||
"POSTGRES_DB": "admin",
|
||||
"POSTGRES_USER": "admin",
|
||||
"POSTGRES_PASSWORD": "admin"
|
||||
},
|
||||
"network_mode": "testNetwork",
|
||||
"volumes": [
|
||||
{
|
||||
"container_path": "/backup",
|
||||
"volume_name": "backup-data"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "postgres_metrics_exporter",
|
||||
"image": "postgres_metrics_exporter.dockerfile",
|
||||
"ports": [{ "internal": 8000, "external": 8000 }]
|
||||
}
|
||||
]
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
# Wait for PostgreSQL to be ready
|
||||
until pg_isready -h $HOST -U $USER; do
|
||||
echo "Waiting for PostgreSQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
11
entrypoint_backup.sh
Normal file
11
entrypoint_backup.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Install PostgreSQL client
|
||||
apk add --no-cache postgresql-client
|
||||
|
||||
# Wait until the PostgreSQL server is ready
|
||||
until pg_isready -h admin -U admin; do
|
||||
echo "Waiting for PostgreSQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
4
entrypoint_odoo.sh
Normal file
4
entrypoint_odoo.sh
Normal file
@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
sleep10
|
||||
odoo -i base
|
14
fluent.conf
Normal file
14
fluent.conf
Normal file
@ -0,0 +1,14 @@
|
||||
<source>
|
||||
@type forward
|
||||
port 8069 # Odoo logs
|
||||
</source>
|
||||
|
||||
<source>
|
||||
@type forward
|
||||
port 3000 # Grafana logs
|
||||
</source>
|
||||
|
||||
<match *>
|
||||
@type file
|
||||
path /fluentd/logs/collected-logs
|
||||
</match>
|
14
odoo.Dockerfile
Normal file
14
odoo.Dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
# Use the existing Odoo image as the base
|
||||
FROM odoo:latest as odoo-custom
|
||||
|
||||
# Copy the entrypoint script into the container
|
||||
COPY entrypoint_odoo.sh /usr/local/bin/entrypoint_odoo.sh
|
||||
|
||||
USER root
|
||||
|
||||
# Make the entrypoint script executable
|
||||
RUN chmod +x /usr/local/bin/entrypoint_odoo.sh
|
||||
|
||||
# Set the new entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint_odoo.sh"]
|
||||
|
7
odoo.conf
Normal file
7
odoo.conf
Normal file
@ -0,0 +1,7 @@
|
||||
[options]
|
||||
db_host = admin
|
||||
db_port = 5432
|
||||
db_user = admin
|
||||
db_password = admin
|
||||
default_productivity_apps = True
|
||||
db_name = admin
|
10
pg_stat_statements.sql
Normal file
10
pg_stat_statements.sql
Normal file
@ -0,0 +1,10 @@
|
||||
-- Créer l'extension pg_stat_statements
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
|
||||
-- Configurer les paramètres pg_stat_statements
|
||||
ALTER SYSTEM SET pg_stat_statements.max = 10000;
|
||||
ALTER SYSTEM SET pg_stat_statements.track = 'all';
|
||||
ALTER SYSTEM SET pg_stat_statements.save = 'on';
|
||||
|
||||
-- Recharger la configuration
|
||||
SELECT pg_reload_conf();
|
12
postgres_metrics_exporter.Dockerfile
Normal file
12
postgres_metrics_exporter.Dockerfile
Normal file
@ -0,0 +1,12 @@
|
||||
# Dockerfile for postgres_metrics_exporter
|
||||
FROM python:3.9-slim-buster
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY prometheus_exporter.py .
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN pip install --no-cache-dir -r requirements2.txt
|
||||
|
||||
CMD ["python", "-u", "prometheus_exporter.py"]
|
||||
|
1
postgresql.conf
Normal file
1
postgresql.conf
Normal file
@ -0,0 +1 @@
|
||||
shared_preload_libraries = 'pg_stat_statements'
|
1
projetPulumi
Submodule
1
projetPulumi
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 2281e6eb41d8540e40e08a3f1f06ca9c179b86e0
|
13
prometheus.yml
Normal file
13
prometheus.yml
Normal file
@ -0,0 +1,13 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'postgres_exporter'
|
||||
static_configs:
|
||||
- targets: ['postgres_exporter:9187']
|
||||
|
||||
- job_name: 'pg_stat_statements'
|
||||
static_configs:
|
||||
- targets: ['localhost:8000']
|
||||
|
||||
|
48
prometheus_exporter.py
Normal file
48
prometheus_exporter.py
Normal file
@ -0,0 +1,48 @@
|
||||
from prometheus_client import start_http_server, Gauge, REGISTRY
|
||||
import psycopg2
|
||||
import time
|
||||
import os
|
||||
|
||||
# Configuration from environment variables
|
||||
DB_HOST = os.environ.get("POSTGRES_HOST", "admin")
|
||||
DB_NAME = os.environ.get("POSTGRES_DB", "admin")
|
||||
DB_USER = os.environ.get("POSTGRES_USER", "admin")
|
||||
DB_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "admin")
|
||||
|
||||
conn_params = {
|
||||
"host": DB_HOST,
|
||||
"database": DB_NAME,
|
||||
"user": DB_USER,
|
||||
"password": DB_PASSWORD
|
||||
}
|
||||
|
||||
QUERY_CALLS = Gauge('postgresql_query_calls', 'Number of PostgreSQL query calls', ['query'])
|
||||
QUERY_TOTAL_TIME = Gauge('postgresql_query_total_time_ms', 'Total time of PostgreSQL queries (ms)', ['query'])
|
||||
|
||||
|
||||
def generate_metrics():
|
||||
try:
|
||||
conn = psycopg2.connect(**conn_params)
|
||||
cur = conn.cursor()
|
||||
cur.execute("""
|
||||
SELECT query, calls, total_time
|
||||
FROM pg_stat_statements
|
||||
ORDER BY total_time DESC;
|
||||
""")
|
||||
|
||||
for row in cur:
|
||||
query = row[0].replace("\\", "\\\\").replace('"', '\\"')
|
||||
QUERY_CALLS.labels(query=query).set(row[1])
|
||||
QUERY_TOTAL_TIME.labels(query=query).set(row[2] * 1000)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except psycopg2.Error as e:
|
||||
print(f"Error connecting to the database: {e}", flush=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_http_server(8000)
|
||||
while True:
|
||||
generate_metrics()
|
||||
time.sleep(60)
|
2
requirements2.txt
Normal file
2
requirements2.txt
Normal file
@ -0,0 +1,2 @@
|
||||
psycopg2-binary
|
||||
prometheus-client
|
Loading…
Reference in New Issue
Block a user