Intento de migraciones con script generador

This commit is contained in:
2026-02-17 13:46:16 +01:00
parent 5d3465fd97
commit 8427613114
10 changed files with 388 additions and 107 deletions

7
.env
View File

@@ -16,9 +16,14 @@ RABBITMQ_VHOST=sim-vhost
# Hay cosas que unificar de varios servicios
POSTGRES_DB=postgres
POSTGRES_DATABASE=postgres
#POSTGRES_HOST=postgresql-sim
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
DEV_POSTGRES_PORT=5432
POSTGRES_USER=postgres
POSTGRES_PASSWORD=1234
# Para el postgres local que hace las migraciones
PGHOST=localhost
PGUSER=alvar
PGPASSWORD=alvar
PGPORT=5433

View File

@@ -17,7 +17,7 @@ CREATE TABLE if not exists objenious_operation (
operation TEXT NOT NULL,
start_date TIMESTAMP NOT NULL DEFAULT now(),
last_change_date TIMESTAMP NOT NULL DEFAULT now(),
finish_date TIMESTAMP,
end_date TIMESTAMP,
error TEXT,
status status_enum,
objenious_status TEXT
@@ -26,7 +26,7 @@ CREATE TABLE if not exists objenious_operation (
-- operaciones pendientes para revisar
CREATE INDEX IF NOT EXISTS pending_operations
ON objenious_operation(start_date)
WHERE end_date IS NULL;
WHERE end_date IS NULL;
CREATE TABLE if not exists objenious_operation_change (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,

View File

@@ -0,0 +1,177 @@
--
-- PostgreSQL database dump
--
\restrict KSNzZnHZpEceNNT6ECpaur3DZ2vfo46zPOlHHzj65wcCLFVfh0bZaudbdKNNiIC
-- Dumped from database version 16.11 (Ubuntu 16.11-0ubuntu0.24.04.1)
-- Dumped by pg_dump version 16.11 (Ubuntu 16.11-0ubuntu0.24.04.1)
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
--
-- Name: order_status; Type: TYPE; Schema: public; Owner: -
--
CREATE TYPE public.order_status AS ENUM (
'pending',
'running',
'finished',
'failed',
'dlx'
);
--
-- Name: order_types; Type: TYPE; Schema: public; Owner: -
--
CREATE TYPE public.order_types AS ENUM (
'activate',
'preactivate',
'cancel',
'pause',
'reactivate',
'unknown'
);
--
-- Name: status_enum; Type: TYPE; Schema: public; Owner: -
--
CREATE TYPE public.status_enum AS ENUM (
'noRequestID',
'noMassID',
'running',
'finished',
'error',
'other'
);
SET default_tablespace = '';
SET default_table_access_method = heap;
--
-- Name: objenious_operation; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.objenious_operation (
id bigint NOT NULL,
retry_count integer DEFAULT 0,
max_retry integer DEFAULT 5,
max_date_retry timestamp without time zone,
iccids text,
request_id text,
mass_action_id text,
operation text NOT NULL,
start_date timestamp without time zone DEFAULT (now() AT TIME ZONE 'utc'::text) NOT NULL,
last_change_date timestamp without time zone DEFAULT now() NOT NULL,
end_date timestamp without time zone,
error text,
status public.status_enum,
objenious_status text
);
--
-- Name: objenious_operation_change; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.objenious_operation_change (
id bigint NOT NULL,
operation_id bigint,
creation_date timestamp without time zone DEFAULT now() NOT NULL,
error text,
new_status public.status_enum,
previous_status public.status_enum,
new_objenious_status text,
previous_objenious_status text,
new_request_id text,
new_mass_action_id text
);
--
-- Name: objenious_operation_change_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
ALTER TABLE public.objenious_operation_change ALTER COLUMN id ADD GENERATED ALWAYS AS IDENTITY (
SEQUENCE NAME public.objenious_operation_change_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1
);
--
-- Name: objenious_operation_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
ALTER TABLE public.objenious_operation ALTER COLUMN id ADD GENERATED ALWAYS AS IDENTITY (
SEQUENCE NAME public.objenious_operation_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1
);
--
-- Name: objenious_operation_change objenious_operation_change_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.objenious_operation_change
ADD CONSTRAINT objenious_operation_change_pkey PRIMARY KEY (id);
--
-- Name: objenious_operation objenious_operation_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.objenious_operation
ADD CONSTRAINT objenious_operation_pkey PRIMARY KEY (id);
--
-- Name: operation_change; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX operation_change ON public.objenious_operation_change USING btree (operation_id);
--
-- Name: pending_operations; Type: INDEX; Schema: public; Owner: -
--
CREATE INDEX pending_operations ON public.objenious_operation USING btree (start_date) WHERE (end_date IS NULL);
--
-- Name: objenious_operation_change fk_operation_id; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.objenious_operation_change
ADD CONSTRAINT fk_operation_id FOREIGN KEY (operation_id) REFERENCES public.objenious_operation(id);
--
-- PostgreSQL database dump complete
--
\unrestrict KSNzZnHZpEceNNT6ECpaur3DZ2vfo46zPOlHHzj65wcCLFVfh0bZaudbdKNNiIC

View File

@@ -1,118 +1,79 @@
-- eliminar los drop para prod
drop domain if exists imei_type cascade;
CREATE DOMAIN imei_type as varchar(15);
drop domain if exists iccid_type cascade;
CREATE DOMAIN iccid_type as varchar(22);
drop domain if exists imsi_type cascade;
CREATE DOMAIN imsi_type as varchar(15);
-- Tablas para el seguimiento de las operaciones de SIM sin importar
-- la cmpañia.
CREATE table if not exists sim_cards (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
imei imei_type,
iccid iccid_type,
imsi imsi_type,
user_id BIGINT,
subscription_id BIGINT,
created_at TIMESTAMP,
last_update TIMESTAMP,
deleted_at TIMESTAMP
CREATE TYPE order_types AS ENUM ('activate','preactivate','cancel','pause','reactivate','unknown');
CREATE TYPE order_status AS ENUM (
'pending', -- Mensaje creado/enviado a RabbitMQ
'running', -- Consumidor ha cogido el mensaje (opcional)
'finished', -- Procesado correctamente
'failed', -- Falló, pero podría reintentarse (Pasar a delay?)
'dlx' -- Falló definitivamente y está en Dead Letter Exchange
);
CREATE TABLE IF NOT EXISTS order_tracking (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
correlation_id VARCHAR(255) NOT NULL, -- ID compartido con RabbitMQ (message_id)
exchange VARCHAR(100), -- Exchange al que se envia (de momento solo hay 1 principal sin contar delay y dlx)
routing_key VARCHAR(100), -- Routing key del mensaje
order_type order_types NOT NULL DEFAULT 'unknown',
CREATE TABLE if not exists sim_envio (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
codigo_origen TEXT,
codigo_distrito TEXT,
pedido_id BIGINT,
sim_id BIGINT,
fecha_envio TIMESTAMP,
fecha_email TIMESTAMP,
is_preactivado BOOLEAN,
fecha_devolucion TIMESTAMP,
created_at TIMESTAMP,
CONSTRAINT fk_sim_id
FOREIGN KEY(sim_id) REFERENCES sim_cards(id)
);
-- Mock, No es parte de SIMs
CREATE TABLE if not exists sf_subscription (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY
);
-- No habria que meterle las propiedades del tipo de subscripcion
CREATE TABLE if not exists sim_subscription_types (
id INT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
subscription TEXT NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP,
deleted_at TIMESTAMP
);
CREATE TABLE if not exists sim_company (
id INT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT,
created_at TIMESTAMP,
updated_at TIMESTAMP,
deleted_at TIMESTAMP
);
CREATE TABLE sim_subscription (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
company_id INT,
subscription_type_id INT,
sim_id BIGINT,
order_id BIGINT,
created_at TIMESTAMP,
updated_at TIMESTAMP,
deleted_at TIMESTAMP,
CONSTRAINT fk_sim_id
FOREIGN KEY(sim_id) REFERENCES sim_cards(id),
CONSTRAINT fk_company_id
FOREIGN KEY(company_id) REFERENCES sim_company(id),
CONSTRAINT fk_subscription_type_id
FOREIGN KEY(subscription_type_id) REFERENCES sim_subscription_types(id)
);
CREATE TABLE if not exists sim_subscription_operations (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
sim_id BIGINT,
operation_type TEXT NOT NULL,
happened_at TIMESTAMP,
payload JSONB, -- Duda si es optimo guardar la copia, es útil en caso de fallo
CONSTRAINT valid_operations CHECK (
operation_type in ('free','preactivate','activate','pause','cancel')
),
-- Campos de reintentos?
status order_status NOT NULL DEFAULT 'pending',
retry_count INT DEFAULT 0,
error_message TEXT, -- Razón del fallo
error_stacktrace TEXT,
CONSTRAINT fk_subscription_id
FOREIGN KEY(sim_id)
REFERENCES sim_subscription(id)
start_date TIMESTAMP NOT NULL DEFAULT (now() at time zone 'utc'),
update_date TIMESTAMP NOT NULL DEFAULT (now() at time zone 'utc'),
finish_date TIMESTAMP
)
-- Busqueda según id de rabbit
CREATE INDEX IF NOT EXISTS idx_order_correlation
ON order_tracking(correlation_id);
-- Ordenenes que todavia no han finalizado
CREATE INDEX IF NOT EXISTS pending_orders
ON order_tracking(start_date)
WHERE order_tracking.finish_date IS NULL;
CREATE TABLE IF NOT EXISTS order_history(
id SERIAL PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
order_id BIGINT NOT NULL,
previous_status order_status NOT NULL, -- Siempre hay un estado anterior, para casos excepcioneale "unknown"
new_status order_status NOT NULL,
change_reason TEXT,
change_date TIMESTAMP NOT NULL DEFAULT (now() at time zone 'utc'),
CONSTRAINT fk_order_id
FOREIGN KEY(order_id)
REFERENCES order_tracking(id)
ON DELETE CASCADE
);
-- Se supone que indica un cambio
CREATE TABLE sim_subscription_historic (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
subscription_id BIGINT,
iccid iccid_type,
company_id INT
);
-- fk de order
CREATE INDEX IF NOT EXISTS idx_order_id
ON order_history(order_id);
-- busquedas por fecha
CREATE INDEX IF NOT EXISTS idx_order_change_date
ON order_history(change_date);
CREATE TYPE status_enum AS ENUM ('noRequestID','noMassID','running','finished','error','other');
-- Tabla para gestionar las peticiones de cambio de objenious.
-- Para una o mas lineas se pueden lanzar operacione que no sabemos
-- con certeza cuando van a terminar.
-- Estas tablas está fuertemente ligadas al sistema que usa la plataforma
-- de objenioius y no debe unsarse para otra compañia.
CREATE TABLE if not exists objenious_operation (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
retry_count INT DEFAULT 0,
max_retry INT DEFAULT 5,
max_date_retry TIMESTAMP DEFAULT NULL,
retry_count INT DEFAULT 0, -- No implementado en codigo
max_retry INT DEFAULT 5, -- No implementado en codigo
max_date_retry TIMESTAMP DEFAULT NULL, -- No implementado en codigo
iccids TEXT,
request_id TEXT,
mass_action_id TEXT,
@@ -128,7 +89,7 @@ CREATE TABLE if not exists objenious_operation (
-- operaciones pendientes para revisar
CREATE INDEX IF NOT EXISTS pending_operations
ON objenious_operation(start_date)
WHERE end_date IS NULL;
WHERE end_date IS NULL;
CREATE TABLE if not exists objenious_operation_change (
id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,

View File

@@ -0,0 +1,6 @@
ALTER TABLE objenious_operation
ALTER COLUMN start_date SET DEFAULT (now() at time zone 'utc'),
ALTER COLUMN last_change_date SET DEFAULT (now() at time zone 'utc');
ALTER TABLE objenious_operation_change
ALTER COLUMN creation_date SET DEFAULT (now() at time zone 'utc');

View File

@@ -0,0 +1,104 @@
#!/bin/bash
# --- Para que siempre se ejecute en el mismo path
cd "$(dirname "$0")"
# --- Configuración por defecto ---
MIGRATIONS_DIR="./migrations"
OUTPUT_FILE_PREFIX="esquema_final"
DB_NAME="temp_schema_build_$(date +%s)"
# --- Función de Ayuda ---
usage() {
echo "Uso: $0 -v <version> [-e <ruta_env>]"
echo " -v Versión semántica objetivo (ej: 1.2.0)"
echo " -e (Opcional) Ruta al archivo .env para cargar variables"
exit 1
}
# --- Procesar Argumentos (Flags) ---
# v: obligatorio
# e: opcionar
while getopts "v:e:" opt; do
case $opt in
v) TARGET_VERSION="$OPTARG" ;;
e) ENV_PATH="$OPTARG" ;;
*) usage ;;
esac
done
# Validar que la versión esté presente
if [ -z "$TARGET_VERSION" ]; then
echo "Error: La versión es obligatoria."
usage
fi
# --- Cargar variables de entorno ---
if [ ! -z "$ENV_PATH" ]; then
if [ -f "$ENV_PATH" ]; then
echo "~> Cargando configuración desde: $ENV_PATH"
# Exporta automáticamente las variables definidas en el archivo
set -o allexport
source "$ENV_PATH"
set +o allexport
else
echo "Error: No se encontró el archivo .env en: $ENV_PATH"
exit 1
fi
else
echo "!> No se especificó archivo .env, usando variables del sistema actual"
fi
# echo "Debug: Usuario es '$PGUSER'"
# echo "Debug: Host es '$PGHOST'"
# echo "Debug: Password es '$PGPASSWORD'" # Cuidado con mostrar esto
# --- Función de limpieza (Safety Net) ---
cleanup() {
echo "~> Limpiando: Eliminando base de datos temporal '$DB_NAME'"
# Usamos las variables de conexión cargadas (si las hay)
dropdb $DB_NAME --if-exists 2>/dev/null
}
trap cleanup EXIT
# --- Inicio del Proceso ---
echo "~> Iniciando build para versión: $TARGET_VERSION"
# 1. Crear BD temporal
# Nota: Si tu .env tiene PGHOST, la BD se creará allí. Si no, en localhost.
createdb $DB_NAME
# 2. Ejecutar script base (si existe)
rm -rf init.sql
cat base/*.sql >init.sql
if [ -f "init.sql" ]; then
echo "~> Ejecutando init.sql..."
psql -d $DB_NAME -f init.sql >/dev/null
fi
# 3. Iterar y filtrar migraciones
echo "~> Aplicando migraciones hasta la versión $TARGET_VERSION..."
for f in $(ls $MIGRATIONS_DIR/*.sql | sort -V); do
FILENAME=$(basename "$f")
# Extraer versión (Asume formato V1.0.0_desc.sql o 1.0.0_desc.sql)
FILE_VER=$(echo "$FILENAME" | sed -E 's/^V//' | awk -F_ '{print $1}')
# Comparación semántica
LOWEST=$(echo -e "$FILE_VER\n$TARGET_VERSION" | sort -V | head -n1)
if [ "$LOWEST" == "$FILE_VER" ] || [ "$FILE_VER" == "$TARGET_VERSION" ]; then
echo "~> Aplicando: $FILENAME ($FILE_VER)"
psql -d $DB_NAME -f "$f" >/dev/null
else
echo "~> Saltando: $FILENAME ($FILE_VER) - Mayor que objetivo"
fi
done
# 4. Generar nombre de archivo de salida
OUTPUT_FILE="${OUTPUT_FILE_PREFIX}_v${TARGET_VERSION}.sql"
# 5. Extraer el esquema FINAL
echo "~> Generando $OUTPUT_FILE ---"
pg_dump -d $DB_NAME -s --no-owner --no-privileges >$OUTPUT_FILE
echo "o> Esquema guardado en $OUTPUT_FILE"

View File

@@ -56,12 +56,37 @@ describe("Test OrderRepository", {}, () => {
assert(order.correlation_id == order1.correlation_id)
})
it("Get pending orders should return a list including the test order", async () => {
it("Get pending orders should return all pending orders in ASC order", async () => {
// We already have 'testId' from before block
// Insert two more orders
const orderA = { ...order1, correlation_id: "pending-A" }
const orderB = { ...order1, correlation_id: "pending-B" }
const resA = await orderRepo.createOrder(orderA)
const resB = await orderRepo.createOrder(orderB)
const idA = resA.data.id
const idB = resB.data.id
const result = await orderRepo.getPendingOrders()
assert(result.error == undefined)
assert(Array.isArray(result.data))
const found = result.data.find(o => o.id === testId)
assert(found != undefined)
// The list should contain at least our 3 orders (testId, idA, idB)
const ids = result.data.map(o => o.id)
assert(ids.includes(testId!))
assert(ids.includes(idA))
assert(ids.includes(idB))
// Verify ordering (ASC by start_date, which maps to ID order in this sequential test)
const indexTest = result.data.findIndex(o => o.id === testId)
const indexA = result.data.findIndex(o => o.id === idA)
const indexB = result.data.findIndex(o => o.id === idB)
assert(indexTest < indexA)
assert(indexA < indexB)
})
it("Update order status should change status and add history", async () => {

View File

@@ -1,4 +1,7 @@
#/bin/bash
rm deployment/database/init.sql
cat deployment/database/*.sql >deployment/database/init.sql
# init sql debe juntar todos los scripts de "base" (sin contar migraciones)
cat deployment/database/base/*.sql >deployment/database/init.sql
docker compose -f deployment/local/docker/docker-compose.yaml --project-directory ./ up --watch