Compare commits

...

No commits in common. "feature-branch" and "main" have entirely different histories.

14 changed files with 110 additions and 264 deletions

5
.env
View File

@ -1,5 +0,0 @@
POSTGRES_PASSWORD=openpgpwd
POSTGRES_USER=openpg
POSTGRES_DB=pulumi
ODOO_PASSWORD=admin
GRAFANA_PASSWORD=grafana_pwd

View File

@ -1,5 +1,5 @@
name: projet name: pulumi4
description: projet pulumi docker description: pulumi
runtime: runtime:
name: python name: python
options: options:

View File

@ -1,81 +1,82 @@
import pulumi import pulumi
import pulumi_docker as docker import pulumi_docker as docker
import json import json
import os
# Load the JSON configuration file # Load the configuration from the JSON file
try: with open('config.json') as f:
with open("config.json", "r") as f: config_data = json.load(f)
containers_data = json.load(f)
except FileNotFoundError:
raise Exception("Error: 'config.json' file not found.")
# Create the network # Create a Docker network
try: network = docker.Network("testNetwork")
network = docker.Network("testNetwork")
except Exception as e: # Initialize the containers list
pulumi.log.error(f"Failed to create network: {e}") containers = []
network = None
# Create containers
for container in containers_data.get("containers", []):
instances = container.get("instances", 1)
for i in range(instances):
container_name = f"{container['name']}-{i}" if instances > 1 else container["name"]
# Configure volumes # Initialize the URL dictionary for export
volumes = {} urls = {}
for volume in container.get("volumes", []):
try:
if "volume_name" in volume and volume["volume_name"] not in volumes:
volumes[volume["volume_name"]] = docker.Volume(volume["volume_name"])
except Exception as e:
pulumi.log.error(f"Failed to create volume {volume.get('volume_name')}: {e}")
volumes_config = []
try:
if "volumes" in container:
volumes_config = [
docker.ContainerVolumeArgs(
container_path=volume["container_path"],
volume_name=volumes[volume["volume_name"]].name
) if "volume_name" in volume else
docker.ContainerVolumeArgs(
container_path=volume["container_path"],
host_path=os.path.abspath(volume["host_path"])
)
for volume in container["volumes"]
]
except KeyError as e:
pulumi.log.warn(f"Missing key in volume configuration: {e}")
except Exception as e:
pulumi.log.error(f"Error configuring volumes for container {container_name}: {e}")
# Create the container # Create containers based on the configuration
try: for container in config_data["containers"]:
container_resource = docker.Container( container_name = container["name"]
container_name, container_envs = [f"{key}={value}" for key, value in container.get("env", {}).items()]
image=container["image"],
hostname=container_name, # Create the container
envs=[ docker_container = docker.Container(
f"{key}={value}" for key, value in container.get("envs", {}).items() container_name,
] if "envs" in container else [], name=container_name,
ports=[ image=container["image"],
docker.ContainerPortArgs( envs=container_envs,
internal=port["internal"], ports=[docker.ContainerPortArgs(
external=port["external"] + i internal=port["internal"],
) for port in container.get("ports", []) external=port["external"]
] if "ports" in container else [], ) for port in container["ports"]],
volumes=volumes_config, network_mode=network.name,
network_mode=network.name if network else None, command=container.get("command", None) or container.get("entrypoint", None),
) ##container.get("entrypoint", "/usr/local/bin/entrypoint.sh")
ports = container.get("ports", [])
if ports: volumes=[docker.ContainerVolumeArgs(
for port in ports: host_path=vol["host_path"],
external_port = port["external"] + i container_path=vol["container_path"]
pulumi.export( ) for vol in container.get("volumes", [])]
f"{container_name}_url", )
f"http://localhost:{external_port}" containers.append(docker_container)
)
except Exception as e: # Add the URLs for the container
pulumi.log.error(f"Failed to create container {container_name}: {e}") for port in container["ports"]:
urls[f"{container_name}_url"] = f"http://localhost:{port['external']}"
# Scale Prometheus containers
for i in range(config_data.get("prometheus_scale", 1)): # Default to 1 if not specified
prometheus_instance = docker.Container(
f"prometheus-instance-{i}",
name=f"prometheus-{i}",
image="prom/prometheus:latest",
ports=[docker.ContainerPortArgs(internal=9090, external=9090 + i)],
network_mode=network.name
)
containers.append(prometheus_instance)
# Add the Prometheus URLs
urls[f"prometheus_{i}_url"] = f"http://localhost:{9090 + i}"
# Scale Fluentd containers
fluentd_scale = config_data.get("fluentd_scale", 1) # Default to 1 if not specified
for i in range(fluentd_scale): # This will scale based on the value from config_data
fluentd_instance = docker.Container(
f"fluentd-instance-{i}",
name=f"fluentd-{i}",
image="fluent/fluentd:v1.14-1", # Corrected image name
ports=[docker.ContainerPortArgs(internal=24224, external=24224 + i)], # Assign unique external port for each container
network_mode=network.name
)
containers.append(fluentd_instance)
# Add the Fluentd URLs
urls[f"fluentd_{i}_url"] = f"http://localhost:{24224 + i}"
# Export network and container details
pulumi.export("network_name", network.name)
pulumi.export("containers", [c.name for c in containers])
pulumi.export("urls", urls)

View File

@ -1,16 +0,0 @@
# Use the Alpine image as the base
FROM alpine:latest as backup_custom
# Copy the entrypoint script into the container
COPY entrypoint_backup.sh /usr/local/bin/entrypoint_backup.sh
# Switch to root user for setup
USER root
# Make the entrypoint script executable
RUN chmod +x /usr/local/bin/entrypoint_backup.sh
# Set the new entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint_backup.sh"]

View File

@ -1,114 +1,51 @@
{ {
"containers": [ "containers": [
{ {
"name": "admin", "name": "postgres",
"image": "postgres:latest", "image": "postgres:16.5",
"envs": { "env": {
"POSTGRES_DB": "admin", "POSTGRES_USER": "odoo",
"POSTGRES_USER": "admin", "POSTGRES_PASSWORD": "odoo",
"POSTGRES_PASSWORD": "admin" "POSTGRES_DB": "postgres",
"POSTGRES_HOST_AUTH_METHOD": "trust"
}, },
"network_mode": "testNetwork", "ports": [
"ports": [{"internal": 5432, "external": 5432}], {"internal": 5432, "external": 5432}
],
"volumes": [ "volumes": [
{ {"host_path": "/local/path/postgresql/data", "container_path": "/var/lib/postgresql/data"}
"container_path": "/var/lib/postgresql/data",
"volume_name": "postgres-data"
},
{
"container_path": "/etc/postgresql/postgresql.conf",
"host_path": "./postgresql.conf"
}
] ]
}, },
{
"name": "postgres_exporter",
"image": "wrouesnel/postgres_exporter:latest",
"envs": {
"DATA_SOURCE_NAME": "postgresql://admin:admin@admin:5432/admin?sslmode=disable"
},
"network_mode": "testNetwork",
"ports": [{"internal": 9187, "external": 9187}]
},
{ {
"name": "odoo", "name": "odoo",
"image": "odoo_custom", "image": "odoo:latest",
"envs": { "env": {
"HOST": "admin", "HOST": "postgres",
"USER": "admin", "USER": "odoo",
"PASSWORD": "admin", "PASSWORD": "odoo",
"DATABASE": "admin", "DATABASE": "postgres",
"ODOO_PASSWORD": "admin" "ODOO_PASSWORD": "admin"
}, },
"network_mode": "testNetwork", "ports": [
"ports": [{"internal": 8069, "external": 8069}], {"internal": 8069, "external": 8069}
"instances": 3, ],
"volumes": [ "command": [
{ "/bin/bash", "-c", "until pg_isready -h postgres -U odoo; do echo 'Waiting for PostgreSQL...'; sleep 2; done; odoo -i base"
"host_path": "./odoo.conf",
"container_path": "/etc/odoo/odoo.conf"
}
] ]
}, },
{ {
"name": "grafana", "name": "grafana",
"image": "grafana/grafana:latest", "image": "grafana/grafana:latest",
"envs": { "env": {
"GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd", "GF_SECURITY_ADMIN_PASSWORD": "grafana_pwd",
"GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090" "GF_DATASOURCES_PROMETHEUS_URL": "http://prometheus:9090"
}, },
"network_mode": "testNetwork", "ports": [
"ports": [{"internal": 3000, "external": 3000}], {"internal": 3000, "external": 3000}
"instances": 2
},
{
"name": "prometheus",
"image": "prom/prometheus:latest",
"network_mode": "testNetwork",
"ports": [{"internal": 9090, "external": 9090}],
"volumes": [
{
"container_path": "/prometheus",
"volume_name": "prometheus-data"
},
{
"container_path": "/etc/prometheus/prometheus.yml",
"host_path": "./prometheus.yml"
}
]
},
{
"name": "fluentd",
"image": "fluent/fluentd:v1.13-1",
"network_mode": "testNetwork",
"ports": [{"internal": 24224, "external": 24224}],
"volumes": [
{
"container_path": "/fluentd/etc/fluent.conf",
"host_path": "./fluent.conf"
}
]
},
{
"name": "backup",
"image": "backup_custom",
"envs": {
"POSTGRES_HOST": "admin",
"POSTGRES_DB": "admin",
"POSTGRES_USER": "admin",
"POSTGRES_PASSWORD": "admin"
},
"network_mode": "testNetwork",
"volumes": [
{
"container_path": "/backup",
"volume_name": "backup-data"
}
] ]
} }
] ],
"prometheus_scale": 2,
"fluentd_scale": 2
} }

6
entrypoint.sh Normal file
View File

@ -0,0 +1,6 @@
# Wait for PostgreSQL to be ready
until pg_isready -h $HOST -U $USER; do
echo "Waiting for PostgreSQL..."
sleep 2
done

View File

@ -1,11 +0,0 @@
#!/bin/sh
# Install PostgreSQL client
apk add --no-cache postgresql-client
# Wait until the PostgreSQL server is ready
until pg_isready -h admin -U admin; do
echo "Waiting for PostgreSQL..."
sleep 2
done

View File

@ -1,4 +0,0 @@
#!/bin/sh
sleep10
odoo -i base

View File

@ -1,14 +0,0 @@
<source>
@type forward
port 8069 # Odoo logs
</source>
<source>
@type forward
port 3000 # Grafana logs
</source>
<match *>
@type file
path /fluentd/logs/collected-logs
</match>

View File

@ -1,14 +0,0 @@
# Use the existing Odoo image as the base
FROM odoo:latest as odoo-custom
# Copy the entrypoint script into the container
COPY entrypoint_odoo.sh /usr/local/bin/entrypoint_odoo.sh
USER root
# Make the entrypoint script executable
RUN chmod +x /usr/local/bin/entrypoint_odoo.sh
# Set the new entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint_odoo.sh"]

View File

@ -1,7 +0,0 @@
[options]
db_host = admin
db_port = 5432
db_user = admin
db_password = admin
default_productivity_apps = True
db_name = admin

View File

@ -1,3 +0,0 @@
shared_preload_libraries = 'pg_stat_statements'
pg_stat_statements.max = 10000
pg_stat_statements.track = all

@ -1 +0,0 @@
Subproject commit 2281e6eb41d8540e40e08a3f1f06ca9c179b86e0

View File

@ -1,23 +0,0 @@
global:
scrape_interval: 15s # How often to scrape metrics (every 15 seconds)
scrape_configs:
- job_name: 'postgres' # For scraping PostgreSQL
static_configs:
- targets: ['postgres:5432']
- job_name: 'odoo' # For scraping Odoo
static_configs:
- targets: ['odoo:8069']
- job_name: 'grafana' # For scraping Grafana
static_configs:
- targets: ['grafana:3000']
- job_name: 'postgres_exporter'
static_configs:
- targets: ['postgres_exporter:9187']