Compare commits

...

No commits in common. "master" and "entrypoint_solution" have entirely different histories.

13 changed files with 85 additions and 245 deletions

View File

@ -1,5 +1,5 @@
# Use the existing Odoo image as the base
FROM odoo:latest as odoo-custom
FROM odoo:latest AS odoo-custom
# Copy the entrypoint script into the container
COPY entrypoint_odoo.sh /usr/local/bin/entrypoint_odoo.sh

View File

@ -1,41 +0,0 @@
before running pulumi up build the images :
docker build -f odoo.Dockerfile -t odoo_custom .
docker build -f backup.Dockerfile -t exporter_custom .
docker build -f exporter.Dockerfile -t exporter_custom .
after running the pulumi up in the postgres container terminal do that :
TO CONFIGURE DATABASE pg_stat_statements :
psql -h admin -U admin -d admin
CREATE EXTENSION pg_stat_statements;
exit
apt-get update && apt-get install nano -y
nano /var/lib/postgresql/data/postgresql.conf
shared_preload_libraries = 'pg_stat_statements'
compute_query_id = on
pg_stat_statements.max = 10000
pg_stat_statements.track = all
restart
TO TEST pg_stat_statements :
psql -h admin -U admin -d admin
SELECT * FROM pg_stat_statements;
SELECT query, calls, total_exec_time, rows, 100.0 * shared_blks_hit /
nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent
FROM pg_stat_statements ORDER BY total_exec_time DESC LIMIT 5;
TO TEST PROMETHEUS SCRAPING :
visit http://localhost:9090/targets
FOR GRAFANA : DISPLAY SELECT QUERIES PER TIME :
sum(increase(postgresql_query_calls{query=~"SELECT.*"}[5m]))

View File

@ -13,11 +13,23 @@ except FileNotFoundError:
# Create the network
try:
network = docker.Network("testNetwork")
pulumi.export("network", network.name)
except Exception as e:
pulumi.log.error(f"Failed to create network: {e}")
network = None
# Create volumes
volumes = {}
for container in containers_data.get("containers", []):
for volume in container.get("volumes", []):
try:
if "volume_name" in volume and volume["volume_name"] not in volumes:
volumes[volume["volume_name"]] = docker.Volume(volume["volume_name"])
except Exception as e:
pulumi.log.error(f"Failed to create volume {volume.get('volume_name')}: {e}")
pulumi.export("volumes", {name: vol.name for name, vol in volumes.items()})
# Create containers
for container in containers_data.get("containers", []):
instances = container.get("instances", 1)
@ -25,13 +37,6 @@ for container in containers_data.get("containers", []):
container_name = f"{container['name']}-{i}" if instances > 1 else container["name"]
# Configure volumes
volumes = {}
for volume in container.get("volumes", []):
try:
if "volume_name" in volume and volume["volume_name"] not in volumes:
volumes[volume["volume_name"]] = docker.Volume(volume["volume_name"])
except Exception as e:
pulumi.log.error(f"Failed to create volume {volume.get('volume_name')}: {e}")
volumes_config = []
try:
if "volumes" in container:
@ -53,22 +58,37 @@ for container in containers_data.get("containers", []):
# Create the container
try:
container_resource = docker.Container(
container_name,
image=container["image"],
hostname=container_name,
envs=[
f"{key}={value}" for key, value in container.get("envs", {}).items()
] if "envs" in container else [],
ports=[
docker.ContainerPortArgs(
internal=port["internal"],
external=port["external"] + i
) for port in container.get("ports", [])
] if "ports" in container else [],
volumes=volumes_config,
network_mode=network.name if network else None,
)
if container["name"] == "odoo": # Special configuration for Odoo
container_resource = docker.Container(
container_name,
image="odoo-custom", # Replace with pre-built image name
envs=[f"{key}={value}" for key, value in container.get("envs", {}).items()],
ports=[
docker.ContainerPortArgs(
internal=port["internal"],
external=port["external"] + i
) for port in container.get("ports", [])
] if "ports" in container else [],
volumes=volumes_config,
network_mode=network.name if network else None,
)
else:
container_resource = docker.Container(
container_name,
image=container["image"],
envs=[
f"{key}={value}" for key, value in container.get("envs", {}).items()
] if "envs" in container else [],
ports=[
docker.ContainerPortArgs(
internal=port["internal"],
external=port["external"] + i
) for port in container.get("ports", [])
] if "ports" in container else [],
volumes=volumes_config,
network_mode=network.name if network else None,
command=container.get("command", []),
)
ports = container.get("ports", [])
if ports:
for port in ports:

View File

@ -1,14 +0,0 @@
# Use the Alpine image as the base
FROM alpine:latest as backup_custom
# Copy the entrypoint script into the container
COPY entrypoint_backup.sh /usr/local/bin/entrypoint_backup.sh
# Switch to root user for setup
USER root
# Make the entrypoint script executable
RUN chmod +x /usr/local/bin/entrypoint_backup.sh
# Set the new entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint_backup.sh"]

View File

@ -1,14 +1,13 @@
{
"containers": [
{
"name": "admin",
"name": "db",
"image": "postgres:latest",
"envs": {
"POSTGRES_DB": "admin",
"POSTGRES_USER": "admin",
"POSTGRES_PASSWORD": "admin"
},
"network_mode": "testNetwork",
"ports": [{"internal": 5432, "external": 5432}],
"volumes": [
{
@ -17,44 +16,18 @@
}
]
},
{
"name": "pgadmin",
"image": "dpage/pgadmin4:latest",
"envs": {
"PGADMIN_DEFAULT_EMAIL": "admin@admin.com",
"PGADMIN_DEFAULT_PASSWORD": "admin"
},
"network_mode": "testNetwork",
"ports": [{"internal": 80, "external": 5050}]
},
{
"name": "prometheus_exporter",
"image": "exporter_custom",
"network_mode": "testNetwork",
"ports": [{"internal": 8000, "external": 8000}]
},
{
"name": "odoo",
"image": "odoo_custom",
"image": "odoo:latest",
"envs": {
"HOST": "admin",
"HOST": "db",
"USER": "admin",
"PASSWORD": "admin",
"DATABASE": "admin",
"ODOO_PASSWORD": "admin"
},
"network_mode": "testNetwork",
"ports": [{"internal": 8069, "external": 8069}],
"volumes": [
{
"host_path": "./odoo.conf",
"container_path": "/etc/odoo/odoo.conf"
},
{
"container_path": "/var/log/odoo",
"host_path": "./logs/odoo"
}
]
"instances": 3
},
{
"name": "grafana",
@ -63,14 +36,12 @@
"GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd",
"GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090"
},
"network_mode": "testNetwork",
"ports": [{"internal": 3000, "external": 3000}],
"instances": 2
},
{
"name": "prometheus",
"image": "prom/prometheus:latest",
"network_mode": "testNetwork",
"ports": [{"internal": 9090, "external": 9090}],
"volumes": [
{
@ -82,17 +53,12 @@
"host_path": "./prometheus.yml"
}
]
},
},
{
"name": "fluentd",
"image": "fluent/fluentd:v1.13-1",
"network_mode": "testNetwork",
"ports": [{"internal": 24224, "external": 24224}],
"volumes": [
{
"container_path": "/var/log/odoo",
"host_path": "./logs/odoo"
},
{
"container_path": "/fluentd/etc/fluent.conf",
"host_path": "./fluent.conf"
@ -101,14 +67,17 @@
},
{
"name": "backup",
"image": "backup_custom",
"image": "alpine:latest",
"envs": {
"POSTGRES_HOST": "admin",
"POSTGRES_HOST": "db",
"POSTGRES_DB": "admin",
"POSTGRES_USER": "admin",
"POSTGRES_PASSWORD": "admin"
},
"network_mode": "testNetwork",
"command": [
"/bin/sh", "-c",
"apk add --no-cache postgresql-client && sleep 10"
],
"volumes": [
{
"container_path": "/backup",

View File

@ -1,11 +1,6 @@
#!/bin/sh
#!/bin/bash
set -c
# Install PostgreSQL client
apk add --no-cache postgresql-client
# Wait until the PostgreSQL server is ready
until pg_isready -h admin -U admin; do
echo "Waiting for PostgreSQL..."
sleep 2
done
sleep 5

View File

@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
sleep 20
sleep 10
odoo -i base

View File

@ -1,27 +0,0 @@
# Use a Python slim image as base
FROM python:3.9-slim as exporter_custom
# Set environment variables to avoid buffering
ENV PYTHONUNBUFFERED=1
# Create and set working directory
WORKDIR /app
# Install required dependencies
RUN apt-get update && \
apt-get install -y \
libpq-dev && \
pip install psycopg2-binary prometheus-client && \
apt-get clean
# Create a directory for logs
RUN mkdir /app/logs
# Copy the Python script into the container
COPY pg_metrics_exporter.py /app/
# Set permissions for log directory (if required)
RUN chmod 755 /app/logs
# Run the script and redirect logs to a file
CMD ["python", "/app/pg_metrics_exporter.py", ">", "/app/logs/exporter.log", "2>&1"]

View File

@ -1,13 +1,14 @@
<source>
@type tail
path "/var/log/odoo/odoo.log"
pos_file "/fluentd/logs/odoo.pos"
format none
tag "odoo.log"
@type forward
port 8069 # Odoo logs
</source>
<match odoo.log>
@type file
path "/fluentd/logs/collected-logs"
</match>
<source>
@type forward
port 3000 # Grafana logs
</source>
<match *>
@type file
path /fluentd/logs/collected-logs
</match>

View File

@ -1,6 +0,0 @@
2024-12-05 14:21:14,148 8 INFO ? odoo: Odoo version 18.0-20241108
2024-12-05 14:21:14,151 8 INFO ? odoo: Using configuration file at /etc/odoo/odoo.conf
2024-12-05 14:21:14,152 8 INFO ? odoo: addons paths: ['/usr/lib/python3/dist-packages/odoo/addons', '/root/.local/share/Odoo/addons/18.0']
2024-12-05 14:21:14,154 8 INFO ? odoo: database: admin@admin:5432
2024-12-05 14:21:14,793 8 INFO ? odoo.addons.base.models.ir_actions_report: Will use the Wkhtmltopdf binary at /usr/local/bin/wkhtmltopdf
2024-12-05 14:21:14,814 8 INFO ? odoo.addons.base.models.ir_actions_report: Will use the Wkhtmltoimage binary at /usr/local/bin/wkhtmltoimage

View File

@ -1,8 +0,0 @@
[options]
db_host = admin
db_port = 5432
db_user = admin
db_password = admin
default_productivity_apps = True
db_name = admin
logfile = /var/log/odoo/odoo.log

View File

@ -1,59 +0,0 @@
import psycopg2
from prometheus_client import start_http_server, Gauge
import time
# Configuration for database connection
DB_PARAMS = {
"host": "admin",
"database": "admin",
"user": "admin",
"password": "admin"
}
# Prometheus metrics
QUERY_CALLS = Gauge('postgresql_query_calls', 'Number of calls for each query', ['query'])
QUERY_TOTAL_TIME = Gauge('postgresql_query_total_time_ms', 'Total execution time for each query in ms', ['query'])
def fetch_metrics():
try:
# Log connection attempt
print("Connecting to PostgreSQL database...")
conn = psycopg2.connect(**DB_PARAMS)
cur = conn.cursor()
# Execute query to get data
cur.execute("""
SELECT query, calls, total_exec_time
FROM pg_stat_statements
ORDER BY total_exec_time DESC;
""")
# Iterate through results and set Prometheus metrics
for row in cur:
query = row[0].replace("\\", "\\\\").replace('"', '\\"') # Escape special characters
calls = row[1]
total_time = row[2] * 1000 # Convert seconds to milliseconds
QUERY_CALLS.labels(query=query).set(calls)
QUERY_TOTAL_TIME.labels(query=query).set(total_time)
# Log the metrics being set
print(f"Metrics set for query: {query} | Calls: {calls} | Total execution time: {total_time} ms")
cur.close()
conn.close()
except psycopg2.Error as e:
print(f"Error fetching data: {e}")
except Exception as e:
print(f"Unexpected error: {e}")
if __name__ == '__main__':
# Start Prometheus HTTP server on port 8000
start_http_server(8000)
print("Exporter running on http://localhost:8000/metrics")
# Main loop to fetch metrics at regular intervals
while True:
fetch_metrics()
time.sleep(60) # Scrape every 60 seconds

View File

@ -1,5 +1,15 @@
global:
scrape_interval: 15s # How often to scrape metrics (every 15 seconds)
scrape_configs:
- job_name: 'pg_stat_statements'
- job_name: 'postgres' # For scraping PostgreSQL
static_configs:
- targets: ['prometheus_exporter:8000']
scrape_interval: 15s
- targets: ['postgres:5432']
- job_name: 'odoo' # For scraping Odoo
static_configs:
- targets: ['odoo:8069']
- job_name: 'grafana' # For scraping Grafana
static_configs:
- targets: ['grafana:3000']