Initial API implementation
This commit is contained in:
parent
ddb85373ac
commit
9138add35c
10
.gitignore
vendored
Normal file
10
.gitignore
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
node_modules/
|
||||
.next/
|
||||
dist/
|
||||
.env
|
||||
.env.*
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
.vscode/
|
||||
.idea/
|
||||
17
index.js
Normal file
17
index.js
Normal file
@ -0,0 +1,17 @@
|
||||
import express from 'express';
|
||||
import dotenv from 'dotenv';
|
||||
import { provisionQueue } from './src/queues/provisionQueue.js';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const app = express();
|
||||
app.use(express.json());
|
||||
|
||||
app.post('/provision', async (req, res) => {
|
||||
const job = await provisionQueue.add('provision', req.body);
|
||||
res.json({ jobId: job.id });
|
||||
});
|
||||
|
||||
app.listen(3000, () => {
|
||||
console.log('API running on http://localhost:3000');
|
||||
});
|
||||
7
keys/zlh-api
Normal file
7
keys/zlh-api
Normal file
@ -0,0 +1,7 @@
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||
QyNTUxOQAAACD05B9qt4j0NO+BYQQpxXnffKNNm8R4c2LoNTty1FOB7AAAAJAm596GJufe
|
||||
hgAAAAtzc2gtZWQyNTUxOQAAACD05B9qt4j0NO+BYQQpxXnffKNNm8R4c2LoNTty1FOB7A
|
||||
AAAEDiTJo0HsU+BTZXvZZDLwmC5XryOXLKc33WhWmdt6ZY+fTkH2q3iPQ074FhBCnFed98
|
||||
o02bxHhzYug1O3LUU4HsAAAAC3psaC1hcGkga2V5AQI=
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
1
keys/zlh-api.pub
Normal file
1
keys/zlh-api.pub
Normal file
@ -0,0 +1 @@
|
||||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPTkH2q3iPQ074FhBCnFed98o02bxHhzYug1O3LUU4Hs zlh-api key
|
||||
2564
package-lock.json
generated
Normal file
2564
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
36
package.json
Normal file
36
package.json
Normal file
@ -0,0 +1,36 @@
|
||||
{
|
||||
"name": "zpack-api",
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"main": "src/app.js",
|
||||
"scripts": {
|
||||
"dev": "nodemon src/app.js",
|
||||
"start": "node src/app.js",
|
||||
"worker": "node src/worker.js",
|
||||
"prisma:generate": "prisma generate",
|
||||
"prisma:migrate": "prisma migrate dev -n api_only_init",
|
||||
"migrate": "prisma migrate dev",
|
||||
"seed": "prisma db seed"
|
||||
},
|
||||
"prisma": {
|
||||
"seed": "node src/prisma/seed-all.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@prisma/client": "^6.13.0",
|
||||
"argon2": "^0.43.1",
|
||||
"axios": "^1.11.0",
|
||||
"bullmq": "^5.56.9",
|
||||
"compression": "^1.8.1",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^17.2.1",
|
||||
"express": "^5.1.0",
|
||||
"ioredis": "^5.7.0",
|
||||
"node-fetch": "^3.3.2",
|
||||
"ssh2": "^1.16.0",
|
||||
"ws": "^8.18.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"nodemon": "^3.1.10",
|
||||
"prisma": "^6.13.0"
|
||||
}
|
||||
}
|
||||
134
prisma/migrations/20251115193513_init/migration.sql
Normal file
134
prisma/migrations/20251115193513_init/migration.sql
Normal file
@ -0,0 +1,134 @@
|
||||
-- CreateTable
|
||||
CREATE TABLE `ContainerTemplate` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`slug` VARCHAR(191) NOT NULL,
|
||||
`game` VARCHAR(191) NULL,
|
||||
`variant` VARCHAR(191) NULL,
|
||||
`ctype` ENUM('game', 'dev') NOT NULL,
|
||||
`templateVmid` INTEGER NOT NULL,
|
||||
`resources` JSON NULL,
|
||||
`network` JSON NULL,
|
||||
`files` JSON NULL,
|
||||
`startup` JSON NULL,
|
||||
`storage` VARCHAR(191) NULL,
|
||||
`tags` VARCHAR(191) NULL,
|
||||
`features` JSON NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
UNIQUE INDEX `ContainerTemplate_slug_key`(`slug`),
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `ContainerInstance` (
|
||||
`vmid` INTEGER NOT NULL,
|
||||
`ctype` ENUM('game', 'dev') NOT NULL,
|
||||
`game` VARCHAR(191) NULL,
|
||||
`variant` VARCHAR(191) NULL,
|
||||
`bridge` VARCHAR(191) NOT NULL,
|
||||
`status` VARCHAR(191) NOT NULL,
|
||||
`tags` VARCHAR(191) NULL,
|
||||
`description` VARCHAR(191) NULL,
|
||||
`templateId` INTEGER NULL,
|
||||
`name` VARCHAR(191) NOT NULL,
|
||||
`hostname` VARCHAR(191) NULL,
|
||||
`ip` VARCHAR(191) NULL,
|
||||
`ingress` JSON NULL,
|
||||
`ports` JSON NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
UNIQUE INDEX `ContainerInstance_hostname_key`(`hostname`),
|
||||
INDEX `ContainerInstance_ctype_status_idx`(`ctype`, `status`),
|
||||
PRIMARY KEY (`vmid`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `PortPool` (
|
||||
`port` INTEGER NOT NULL,
|
||||
`protocol` VARCHAR(191) NOT NULL,
|
||||
`status` VARCHAR(191) NOT NULL,
|
||||
`portType` ENUM('GAME', 'DEV') NOT NULL,
|
||||
`vmid` INTEGER NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
INDEX `PortPool_status_portType_idx`(`status`, `portType`),
|
||||
PRIMARY KEY (`port`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `HostSlot` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`label` VARCHAR(191) NOT NULL,
|
||||
`slot` INTEGER NOT NULL,
|
||||
`hostname` VARCHAR(191) NOT NULL,
|
||||
`basePort` INTEGER NOT NULL,
|
||||
`status` VARCHAR(191) NOT NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
UNIQUE INDEX `HostSlot_hostname_key`(`hostname`),
|
||||
INDEX `HostSlot_label_status_idx`(`label`, `status`),
|
||||
INDEX `HostSlot_slot_idx`(`slot`),
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `DeletedInstance` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`vmid` INTEGER NOT NULL,
|
||||
`hostname` VARCHAR(191) NULL,
|
||||
`data` JSON NULL,
|
||||
`deletedAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `JobLog` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`jobType` VARCHAR(191) NOT NULL,
|
||||
`vmid` INTEGER NULL,
|
||||
`hostname` VARCHAR(191) NULL,
|
||||
`payload` JSON NULL,
|
||||
`result` VARCHAR(191) NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `EdgeState` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`vmid` INTEGER NULL,
|
||||
`hostname` VARCHAR(191) NULL,
|
||||
`lastSync` DATETIME(3) NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`dnsState` JSON NULL,
|
||||
`proxyState` JSON NULL,
|
||||
`velocity` JSON NULL,
|
||||
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `SystemConfig` (
|
||||
`key` VARCHAR(191) NOT NULL,
|
||||
`value` VARCHAR(191) NULL,
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
PRIMARY KEY (`key`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `VmidCounter` (
|
||||
`key` VARCHAR(191) NOT NULL,
|
||||
`current` INTEGER NOT NULL,
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
PRIMARY KEY (`key`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE `ContainerInstance` ADD CONSTRAINT `ContainerInstance_templateId_fkey` FOREIGN KEY (`templateId`) REFERENCES `ContainerTemplate`(`id`) ON DELETE SET NULL ON UPDATE CASCADE;
|
||||
@ -0,0 +1,142 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `bridge` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `name` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `files` on the `ContainerTemplate` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `network` on the `ContainerTemplate` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `startup` on the `ContainerTemplate` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `storage` on the `ContainerTemplate` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `tags` on the `ContainerTemplate` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `data` on the `DeletedInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `dnsState` on the `EdgeState` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `lastSync` on the `EdgeState` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `proxyState` on the `EdgeState` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `velocity` on the `EdgeState` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `hostname` on the `JobLog` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `payload` on the `JobLog` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `result` on the `JobLog` table. All the data in the column will be lost.
|
||||
- The primary key for the `PortPool` table will be changed. If it partially fails, the table could be left without primary key constraint.
|
||||
- You are about to drop the column `protocol` on the `PortPool` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `vmid` on the `PortPool` table. All the data in the column will be lost.
|
||||
- You are about to alter the column `status` on the `PortPool` table. The data in that column could be lost. The data in that column will be cast from `VarChar(191)` to `Enum(EnumId(2))`.
|
||||
- You are about to alter the column `portType` on the `PortPool` table. The data in that column could be lost. The data in that column will be cast from `Enum(EnumId(1))` to `VarChar(191)`.
|
||||
- You are about to drop the column `updatedAt` on the `SystemConfig` table. All the data in the column will be lost.
|
||||
- You are about to alter the column `value` on the `SystemConfig` table. The data in that column could be lost. The data in that column will be cast from `VarChar(191)` to `Json`.
|
||||
- You are about to drop the `HostSlot` table. If the table is not empty, all the data it contains will be lost.
|
||||
- A unique constraint covering the columns `[game,variant,ctype]` on the table `ContainerTemplate` will be added. If there are existing duplicate values, this will fail.
|
||||
- A unique constraint covering the columns `[port]` on the table `PortPool` will be added. If there are existing duplicate values, this will fail.
|
||||
- Made the column `hostname` on table `ContainerInstance` required. This step will fail if there are existing NULL values in that column.
|
||||
- Added the required column `defBridge` to the `ContainerTemplate` table without a default value. This is not possible if the table is not empty.
|
||||
- Made the column `game` on table `ContainerTemplate` required. This step will fail if there are existing NULL values in that column.
|
||||
- Made the column `variant` on table `ContainerTemplate` required. This step will fail if there are existing NULL values in that column.
|
||||
- Made the column `hostname` on table `DeletedInstance` required. This step will fail if there are existing NULL values in that column.
|
||||
- Added the required column `updatedAt` to the `EdgeState` table without a default value. This is not possible if the table is not empty.
|
||||
- Made the column `vmid` on table `EdgeState` required. This step will fail if there are existing NULL values in that column.
|
||||
- Made the column `hostname` on table `EdgeState` required. This step will fail if there are existing NULL values in that column.
|
||||
- Added the required column `state` to the `JobLog` table without a default value. This is not possible if the table is not empty.
|
||||
- Added the required column `id` to the `PortPool` table without a default value. This is not possible if the table is not empty.
|
||||
|
||||
*/
|
||||
-- DropIndex
|
||||
DROP INDEX `ContainerInstance_ctype_status_idx` ON `ContainerInstance`;
|
||||
|
||||
-- DropIndex
|
||||
DROP INDEX `ContainerInstance_hostname_key` ON `ContainerInstance`;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `ContainerInstance` DROP COLUMN `bridge`,
|
||||
DROP COLUMN `name`,
|
||||
ADD COLUMN `customerId` VARCHAR(191) NULL,
|
||||
MODIFY `hostname` VARCHAR(191) NOT NULL;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `ContainerTemplate` DROP COLUMN `files`,
|
||||
DROP COLUMN `network`,
|
||||
DROP COLUMN `startup`,
|
||||
DROP COLUMN `storage`,
|
||||
DROP COLUMN `tags`,
|
||||
ADD COLUMN `defBridge` VARCHAR(191) NOT NULL,
|
||||
MODIFY `game` VARCHAR(191) NOT NULL,
|
||||
MODIFY `variant` VARCHAR(191) NOT NULL;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `DeletedInstance` DROP COLUMN `data`,
|
||||
ADD COLUMN `customerId` VARCHAR(191) NULL,
|
||||
ADD COLUMN `game` VARCHAR(191) NULL,
|
||||
ADD COLUMN `ip` VARCHAR(191) NULL,
|
||||
ADD COLUMN `notes` VARCHAR(191) NULL,
|
||||
ADD COLUMN `ports` JSON NULL,
|
||||
ADD COLUMN `reason` VARCHAR(191) NULL,
|
||||
ADD COLUMN `variant` VARCHAR(191) NULL,
|
||||
MODIFY `hostname` VARCHAR(191) NOT NULL;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `EdgeState` DROP COLUMN `dnsState`,
|
||||
DROP COLUMN `lastSync`,
|
||||
DROP COLUMN `proxyState`,
|
||||
DROP COLUMN `velocity`,
|
||||
ADD COLUMN `createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
ADD COLUMN `edgeIp` VARCHAR(191) NULL,
|
||||
ADD COLUMN `ip` VARCHAR(191) NULL,
|
||||
ADD COLUMN `updatedAt` DATETIME(3) NOT NULL,
|
||||
MODIFY `vmid` INTEGER NOT NULL,
|
||||
MODIFY `hostname` VARCHAR(191) NOT NULL;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `JobLog` DROP COLUMN `hostname`,
|
||||
DROP COLUMN `payload`,
|
||||
DROP COLUMN `result`,
|
||||
ADD COLUMN `message` VARCHAR(191) NULL,
|
||||
ADD COLUMN `state` VARCHAR(191) NOT NULL;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `PortPool` DROP PRIMARY KEY,
|
||||
DROP COLUMN `protocol`,
|
||||
DROP COLUMN `vmid`,
|
||||
ADD COLUMN `allocatedTo` INTEGER NULL,
|
||||
ADD COLUMN `id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
MODIFY `status` ENUM('free', 'allocated') NOT NULL DEFAULT 'free',
|
||||
MODIFY `portType` VARCHAR(191) NOT NULL,
|
||||
ADD PRIMARY KEY (`id`);
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `SystemConfig` DROP COLUMN `updatedAt`,
|
||||
MODIFY `value` JSON NULL;
|
||||
|
||||
-- DropTable
|
||||
DROP TABLE `HostSlot`;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `AuditLog` (
|
||||
`id` INTEGER NOT NULL AUTO_INCREMENT,
|
||||
`action` VARCHAR(191) NOT NULL,
|
||||
`actor` VARCHAR(191) NULL,
|
||||
`payload` JSON NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE `Customer` (
|
||||
`id` VARCHAR(191) NOT NULL,
|
||||
`email` VARCHAR(191) NULL,
|
||||
`name` VARCHAR(191) NULL,
|
||||
`createdAt` DATETIME(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
|
||||
`updatedAt` DATETIME(3) NOT NULL,
|
||||
|
||||
PRIMARY KEY (`id`)
|
||||
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX `ContainerInstance_hostname_idx` ON `ContainerInstance`(`hostname`);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX `ContainerInstance_customerId_idx` ON `ContainerInstance`(`customerId`);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX `ContainerTemplate_game_variant_ctype_key` ON `ContainerTemplate`(`game`, `variant`, `ctype`);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX `PortPool_port_key` ON `PortPool`(`port`);
|
||||
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `description` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `game` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `ingress` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `ports` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `status` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `tags` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `templateId` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the column `variant` on the `ContainerInstance` table. All the data in the column will be lost.
|
||||
- You are about to drop the `ContainerTemplate` table. If the table is not empty, all the data it contains will be lost.
|
||||
- Added the required column `agentState` to the `ContainerInstance` table without a default value. This is not possible if the table is not empty.
|
||||
- Added the required column `payload` to the `ContainerInstance` table without a default value. This is not possible if the table is not empty.
|
||||
|
||||
*/
|
||||
-- DropForeignKey
|
||||
ALTER TABLE `ContainerInstance` DROP FOREIGN KEY `ContainerInstance_templateId_fkey`;
|
||||
|
||||
-- DropIndex
|
||||
DROP INDEX `ContainerInstance_templateId_fkey` ON `ContainerInstance`;
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE `ContainerInstance` DROP COLUMN `description`,
|
||||
DROP COLUMN `game`,
|
||||
DROP COLUMN `ingress`,
|
||||
DROP COLUMN `ports`,
|
||||
DROP COLUMN `status`,
|
||||
DROP COLUMN `tags`,
|
||||
DROP COLUMN `templateId`,
|
||||
DROP COLUMN `variant`,
|
||||
ADD COLUMN `agentLastSeen` DATETIME(3) NULL,
|
||||
ADD COLUMN `agentState` VARCHAR(191) NOT NULL,
|
||||
ADD COLUMN `allocatedPorts` JSON NULL,
|
||||
ADD COLUMN `crashCount` INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN `lastCrashAt` DATETIME(3) NULL,
|
||||
ADD COLUMN `payload` JSON NOT NULL;
|
||||
|
||||
-- DropTable
|
||||
DROP TABLE `ContainerTemplate`;
|
||||
3
prisma/migrations/migration_lock.toml
Normal file
3
prisma/migrations/migration_lock.toml
Normal file
@ -0,0 +1,3 @@
|
||||
# Please do not edit this file manually
|
||||
# It should be added in your version-control system (e.g., Git)
|
||||
provider = "mysql"
|
||||
148
prisma/schema.prisma
Normal file
148
prisma/schema.prisma
Normal file
@ -0,0 +1,148 @@
|
||||
// prisma/schema.prisma
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
datasource db {
|
||||
provider = "mysql"
|
||||
url = env("DATABASE_URL")
|
||||
shadowDatabaseUrl = env("SHADOW_DATABASE_URL")
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// ENUMS
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
enum CType {
|
||||
game
|
||||
dev
|
||||
}
|
||||
|
||||
enum PortStatus {
|
||||
free
|
||||
allocated
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// ACTIVE CONTAINERS (Agent-Driven v2)
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
model ContainerInstance {
|
||||
vmid Int @id
|
||||
customerId String?
|
||||
ctype CType
|
||||
hostname String
|
||||
ip String?
|
||||
|
||||
// Ports allocated to this VMID (e.g. { "game": [50000], "rcon": [50001] })
|
||||
allocatedPorts Json?
|
||||
|
||||
// Exact payload.json written into /opt/zlh-agent/config/payload.json
|
||||
payload Json
|
||||
|
||||
// Agent-reported state: idle | installing | running | crashed | error
|
||||
agentState String
|
||||
|
||||
// Last time we successfully talked to the agent (/status)
|
||||
agentLastSeen DateTime?
|
||||
|
||||
// Crash tracking
|
||||
crashCount Int @default(0)
|
||||
lastCrashAt DateTime?
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
@@index([hostname])
|
||||
@@index([customerId])
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// DELETED INSTANCES (AUDIT TRAIL)
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
model DeletedInstance {
|
||||
id Int @id @default(autoincrement())
|
||||
vmid Int
|
||||
customerId String?
|
||||
hostname String
|
||||
game String?
|
||||
variant String?
|
||||
ports Json?
|
||||
ip String?
|
||||
reason String?
|
||||
notes String?
|
||||
deletedAt DateTime @default(now())
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// PORT ALLOCATION POOL
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
model PortPool {
|
||||
id Int @id @default(autoincrement())
|
||||
port Int
|
||||
portType String // "game" | "dev" | custom
|
||||
status PortStatus @default(free)
|
||||
allocatedTo Int? // vmid
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
@@unique([port])
|
||||
@@index([status, portType])
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// VMID COUNTERS
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
model VmidCounter {
|
||||
key String @id // "game" | "dev"
|
||||
current Int
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// SYSTEM & EDGE STATE
|
||||
//////////////////////////////////////////////////////////
|
||||
|
||||
model SystemConfig {
|
||||
key String @id
|
||||
value Json?
|
||||
}
|
||||
|
||||
model EdgeState {
|
||||
id Int @id @default(autoincrement())
|
||||
vmid Int
|
||||
hostname String
|
||||
ip String?
|
||||
edgeIp String?
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
|
||||
model JobLog {
|
||||
id Int @id @default(autoincrement())
|
||||
vmid Int?
|
||||
jobType String
|
||||
state String
|
||||
message String?
|
||||
createdAt DateTime @default(now())
|
||||
}
|
||||
|
||||
model AuditLog {
|
||||
id Int @id @default(autoincrement())
|
||||
action String
|
||||
actor String?
|
||||
payload Json?
|
||||
createdAt DateTime @default(now())
|
||||
}
|
||||
|
||||
model Customer {
|
||||
id String @id
|
||||
email String?
|
||||
name String?
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
}
|
||||
1
prom_sd_token.txt
Normal file
1
prom_sd_token.txt
Normal file
@ -0,0 +1 @@
|
||||
oQYsYJANvj2v_0shf_h_rpcD_J536DB5utgq29pVapmd5h9WXN_zONLI14zqt76s
|
||||
12
src/ansible/runProvision.js
Normal file
12
src/ansible/runProvision.js
Normal file
@ -0,0 +1,12 @@
|
||||
import { exec } from 'child_process';
|
||||
import util from 'util';
|
||||
|
||||
const execAsync = util.promisify(exec);
|
||||
|
||||
export async function runProvisionPlaybook(data) {
|
||||
const { user_id, game, ports, mode } = data;
|
||||
const cmd = `ansible-playbook provision.yml --extra-vars 'user_id=${user_id} game=${game} ports=${ports} mode=${mode}'`;
|
||||
const { stdout, stderr } = await execAsync(cmd);
|
||||
if (stderr) throw new Error(stderr);
|
||||
console.log(stdout);
|
||||
}
|
||||
305
src/api/provision.js
Normal file
305
src/api/provision.js
Normal file
@ -0,0 +1,305 @@
|
||||
// src/api/provision.js
|
||||
// Orchestrates: clone → config → start → hook → IP → DB → enqueue (commit in worker).
|
||||
|
||||
import 'dotenv/config';
|
||||
import crypto from 'node:crypto';
|
||||
|
||||
import { getTemplateOrThrow } from '../services/templateResolver.js';
|
||||
import proxmox from '../services/proxmoxClient.js';
|
||||
import prisma from '../services/prisma.js';
|
||||
import { PortAllocationService } from '../services/portAllocator.js';
|
||||
import { allocateVmid, confirmVmidAllocated, releaseVmid } from '../services/vmidAllocator.js';
|
||||
import { enqueuePublishEdge } from '../queues/postProvision.js';
|
||||
import { writeSlotEnv } from '../services/envFileWriter.js';
|
||||
import { getCtIpWithRetry } from '../services/getCtIp.js';
|
||||
|
||||
const SLEEP_AFTER_START_MS = Number(process.env.CT_EXEC_GRACE_MS || 8000); // default 8s
|
||||
const STEP_DELAY_MS = 2500; // pause between steps
|
||||
const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
|
||||
|
||||
function sanitizeHostname(s) {
|
||||
return String(s || '')
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9-]/g, '-')
|
||||
.replace(/-+/g, '-')
|
||||
.replace(/^-|-$/g, '');
|
||||
}
|
||||
|
||||
function mergeResources(template, override) {
|
||||
const t = template?.resources || {};
|
||||
const o = override || {};
|
||||
let cpu = o.cpu ?? t.cpu ?? 2;
|
||||
cpu = Math.max(1, Math.min(cpu, 3));
|
||||
return {
|
||||
cpu,
|
||||
memory: o.memory ?? t.memory ?? 1024,
|
||||
disk: o.disk ?? t.disk ?? 0,
|
||||
};
|
||||
}
|
||||
|
||||
function pickBridge(ctype, template) {
|
||||
return template?.network?.bridge || (ctype === 'dev' ? 'vmbr2' : 'vmbr3');
|
||||
}
|
||||
|
||||
// --- UPID helpers ---
|
||||
function isUpidError(error) {
|
||||
const msg = (error?.message || error?.response?.data?.errors?.upid || '').toLowerCase();
|
||||
return msg.includes('unable to parse worker upid') || (msg.includes('upid') && msg.includes('parse'));
|
||||
}
|
||||
|
||||
async function waitForTask(node, upid, timeoutMs = 180000, everyMs = 2000) {
|
||||
if (!upid) throw new Error('No UPID provided');
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
let backoffMs = everyMs;
|
||||
|
||||
while (Date.now() < deadline) {
|
||||
try {
|
||||
const st = await proxmox.getTaskStatus(upid);
|
||||
if (st.status === 'stopped') {
|
||||
if (st.exitstatus === 'OK') return true;
|
||||
throw new Error(`task ${upid} failed: ${st.exitstatus}`);
|
||||
}
|
||||
await sleep(backoffMs);
|
||||
} catch (err) {
|
||||
if (isUpidError(err)) {
|
||||
console.warn(`[provision] UPID error detected: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
backoffMs = Math.min(backoffMs * 1.5, 10000);
|
||||
await sleep(backoffMs);
|
||||
}
|
||||
}
|
||||
throw new Error(`task ${upid} timed out`);
|
||||
}
|
||||
|
||||
async function executeTaskSafely(taskPromise, vmid, expectedStatus, operation) {
|
||||
try {
|
||||
const task = await taskPromise;
|
||||
if (!task || !task.upid) {
|
||||
console.warn(`[provision] No UPID for ${operation}, falling back to status polling`);
|
||||
if (expectedStatus) {
|
||||
return await proxmox.waitForStatus(vmid, expectedStatus, { timeoutMs: 180000 });
|
||||
}
|
||||
return true;
|
||||
}
|
||||
await waitForTask('zlh-prod1', task.upid);
|
||||
return true;
|
||||
} catch (err) {
|
||||
if (isUpidError(err) && expectedStatus) {
|
||||
console.warn(`[provision] UPID error in ${operation}, falling back to status polling`);
|
||||
return await proxmox.waitForStatus(vmid, expectedStatus, { timeoutMs: 180000 });
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Post-start hook ---
|
||||
async function runGamePostStartHook({ game, vmid, ports }) {
|
||||
if (String(game || '').toLowerCase() !== 'minecraft') return;
|
||||
console.log(`[hook] Minecraft env injection scheduled for vmid=${vmid}`);
|
||||
}
|
||||
|
||||
// === Main ===
|
||||
export async function createContainer(body) {
|
||||
const {
|
||||
templateSlug,
|
||||
game,
|
||||
variant,
|
||||
ctype: ctypeReq,
|
||||
name,
|
||||
customerId,
|
||||
resources: resourcesOverride,
|
||||
portsNeeded = 0,
|
||||
storage,
|
||||
} = body || {};
|
||||
|
||||
console.log('[provision] STEP 0: Starting container creation request');
|
||||
if (!templateSlug && !(game && variant)) throw new Error('templateSlug required');
|
||||
if (!customerId) throw new Error('customerId required');
|
||||
|
||||
console.log('[provision] STEP 1: Resolving template');
|
||||
const template = await getTemplateOrThrow({ templateSlug, game, variant });
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
const ctype = String(ctypeReq || template?.ctype || 'game');
|
||||
const gameFinal = game || template?.game || null;
|
||||
const variantFin = variant || template?.variant || null;
|
||||
|
||||
let vmid, allocatedPorts = [], txnId, slotHostname, instanceHostname;
|
||||
|
||||
try {
|
||||
console.log('[provision] STEP 2: Allocating VMID');
|
||||
vmid = await allocateVmid(ctype);
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
if (portsNeeded > 0) {
|
||||
console.log('[provision] STEP 3: Reserving ports');
|
||||
txnId = crypto.randomUUID();
|
||||
let ports = await PortAllocationService.reserve({
|
||||
game: gameFinal,
|
||||
variant: variantFin,
|
||||
customerId,
|
||||
vmid,
|
||||
purpose: ctype === 'game' ? 'game_main' : 'dev',
|
||||
txnId,
|
||||
count: portsNeeded,
|
||||
});
|
||||
if (Array.isArray(ports) && typeof ports[0] === 'object') ports = ports.map((p) => p.port);
|
||||
allocatedPorts = ports;
|
||||
await sleep(STEP_DELAY_MS);
|
||||
}
|
||||
|
||||
// --- PREPARE CONFIG VALUES ---
|
||||
const res = mergeResources(template, resourcesOverride);
|
||||
const bridge = pickBridge(ctype, template);
|
||||
instanceHostname = sanitizeHostname(name || `${template.slug}-${vmid}`);
|
||||
const ZONE = process.env.TECHNITIUM_ZONE || 'zerolaghub.quest';
|
||||
slotHostname = `${instanceHostname}.${ZONE}`; // FQDN for DNS/Traefik
|
||||
const store = storage || template.storage || process.env.PROXMOX_STORAGE;
|
||||
|
||||
const tagsStr = [
|
||||
`cust-${customerId}`,
|
||||
`type-${ctype}`,
|
||||
gameFinal ? `game-${gameFinal}` : null,
|
||||
variantFin ? `var-${variantFin}` : null,
|
||||
txnId ? `txn-${txnId}` : null,
|
||||
].filter(Boolean).join(',');
|
||||
|
||||
const description = `customer=${customerId}; template=${template.slug}; vmid=${vmid}; txn=${txnId || 'n/a'}`;
|
||||
|
||||
console.log('[provision] STEP 4: Writing env file');
|
||||
await writeSlotEnv(vmid, {
|
||||
GAME: gameFinal,
|
||||
PORT: allocatedPorts[0],
|
||||
HOSTNAME: instanceHostname, // ✅ short hostname inside container
|
||||
MAX_PLAYERS: 20,
|
||||
MOTD: `ZeroLagHub ${gameFinal || 'Game'}`,
|
||||
});
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
console.log('[provision] STEP 5: Cloning container');
|
||||
await executeTaskSafely(
|
||||
proxmox.cloneContainer({ templateVmid: template.templateVmid, vmid, name: instanceHostname, storage: store, full: 1 }),
|
||||
vmid,
|
||||
'stopped',
|
||||
'clone'
|
||||
);
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
console.log('[provision] STEP 6: Configuring container');
|
||||
await executeTaskSafely(
|
||||
proxmox.configureContainer({ vmid, cpu: res.cpu, memory: res.memory, bridge, description, tags: tagsStr }),
|
||||
vmid,
|
||||
null,
|
||||
'configure'
|
||||
);
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
if (process.env.PVE_ALLOW_RESIZE === '1' && res.disk) {
|
||||
console.log('[provision] STEP 7: Resizing container');
|
||||
const resizeTask = await proxmox.resizeContainer(vmid, { disk: 'rootfs', addGiB: Number(res.disk) });
|
||||
if (resizeTask?.upid) await waitForTask('zlh-prod1', resizeTask.upid);
|
||||
const resizeGrace = Number(process.env.RESIZE_GRACE_MS || 45000);
|
||||
console.log(`[provision] waiting ${resizeGrace}ms after resize before start`);
|
||||
await sleep(resizeGrace);
|
||||
}
|
||||
|
||||
console.log('[provision] STEP 8: Starting container');
|
||||
let started = false;
|
||||
for (let attempt = 1; attempt <= 3; attempt++) {
|
||||
try {
|
||||
await executeTaskSafely(proxmox.startContainer(vmid), vmid, 'running', 'start');
|
||||
started = true;
|
||||
break;
|
||||
} catch (err) {
|
||||
console.warn(`[provision] Start attempt ${attempt} failed: ${err.message}`);
|
||||
if (attempt < 3) {
|
||||
const backoff = attempt * 15000;
|
||||
console.log(`[provision] Retrying start in ${backoff / 1000}s...`);
|
||||
await sleep(backoff);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!started) throw new Error(`Container ${vmid} did not start after retries`);
|
||||
if (SLEEP_AFTER_START_MS > 0) await sleep(SLEEP_AFTER_START_MS);
|
||||
|
||||
console.log('[provision] STEP 9: Running post-start hook');
|
||||
await runGamePostStartHook({ game: gameFinal, vmid, ports: allocatedPorts });
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
console.log('[provision] STEP 10: Detecting container IP');
|
||||
const ctIp = await getCtIpWithRetry(vmid, process.env.PROXMOX_NODE, 12, 10000);
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
console.log('[provision] STEP 11: Inserting DB record');
|
||||
const instance = await prisma.containerInstance.create({
|
||||
data: {
|
||||
vmid,
|
||||
customerId,
|
||||
ctype,
|
||||
game: gameFinal,
|
||||
variant: variantFin,
|
||||
ip: ctIp,
|
||||
ports: allocatedPorts,
|
||||
status: 'running',
|
||||
description,
|
||||
hostname: instanceHostname,
|
||||
|
||||
// ⭐ CORRECT RELATION
|
||||
template: {
|
||||
connect: { id: template.id },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
if (allocatedPorts.length > 0) {
|
||||
console.log('[provision] STEP 12: Enqueuing edge publish');
|
||||
try {
|
||||
await enqueuePublishEdge({
|
||||
vmid,
|
||||
slotHostname, // ✅ full FQDN for DNS/Traefik
|
||||
game: gameFinal,
|
||||
instanceHostname, // short
|
||||
ports: allocatedPorts,
|
||||
ctIp,
|
||||
txnId
|
||||
});
|
||||
await sleep(STEP_DELAY_MS);
|
||||
|
||||
// Mark ports committed
|
||||
await PortAllocationService.commit({ vmid, ports: allocatedPorts });
|
||||
} catch (err) {
|
||||
console.error(`[provision] STEP 12 failed for vmid=${vmid}:`, err.message || err);
|
||||
throw err; // bubble up to outer catch
|
||||
}
|
||||
}
|
||||
|
||||
// Confirm VMID committed
|
||||
await confirmVmidAllocated(vmid);
|
||||
|
||||
console.log('[provision] COMPLETE: success');
|
||||
return { vmid, instance, ports: allocatedPorts, slotHostname, instanceHostname };
|
||||
} catch (err) {
|
||||
console.error('[provision] ERROR:', err.message || err);
|
||||
|
||||
try {
|
||||
if (vmid) await PortAllocationService.releaseByVmid(vmid);
|
||||
} catch (e) {
|
||||
console.warn('[provision] rollback ports failed:', e.message || e);
|
||||
}
|
||||
|
||||
if (!process.env.DEBUG_KEEP_FAILED) {
|
||||
try { if (vmid) await proxmox.deleteContainer(vmid); } catch {}
|
||||
try { if (vmid) await releaseVmid(vmid); } catch {}
|
||||
} else {
|
||||
console.warn(`[provision] DEBUG_KEEP_FAILED=1 → leaving container ${vmid} for inspection`);
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
export default { createContainer };
|
||||
452
src/api/provisionAgent.js
Normal file
452
src/api/provisionAgent.js
Normal file
@ -0,0 +1,452 @@
|
||||
// src/api/provisionAgent.js
|
||||
// FINAL AGENT-DRIVEN PROVISIONING PIPELINE
|
||||
// Supports: paper, vanilla, purpur, forge, fabric, neoforge + Steam creds passthrough
|
||||
|
||||
import "dotenv/config";
|
||||
import fetch from "node-fetch";
|
||||
import crypto from "crypto";
|
||||
|
||||
import prisma from "../services/prisma.js";
|
||||
import proxmox, {
|
||||
cloneContainer,
|
||||
configureContainer,
|
||||
startWithRetry,
|
||||
deleteContainer,
|
||||
} from "../services/proxmoxClient.js";
|
||||
|
||||
import { getCtIpWithRetry } from "../services/getCtIp.js";
|
||||
import { PortAllocationService } from "../services/portAllocator.js";
|
||||
import {
|
||||
allocateVmid,
|
||||
confirmVmidAllocated,
|
||||
releaseVmid,
|
||||
} from "../services/vmidAllocator.js";
|
||||
|
||||
import { enqueuePublishEdge } from "../queues/postProvision.js";
|
||||
|
||||
const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
|
||||
|
||||
const AGENT_TEMPLATE_VMID = Number(
|
||||
process.env.AGENT_TEMPLATE_VMID ||
|
||||
process.env.BASE_TEMPLATE_VMID ||
|
||||
process.env.PROXMOX_AGENT_TEMPLATE_VMID ||
|
||||
900
|
||||
);
|
||||
|
||||
const AGENT_PORT = Number(process.env.ZLH_AGENT_PORT || 18888);
|
||||
const AGENT_TOKEN = process.env.ZLH_AGENT_TOKEN || null;
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
VERSION PARSER
|
||||
------------------------------------------------------------- */
|
||||
function parseMcVersion(ver) {
|
||||
if (!ver) return { major: 0, minor: 0, patch: 0 };
|
||||
const p = String(ver).split(".");
|
||||
return {
|
||||
major: Number(p[0]) || 0,
|
||||
minor: Number(p[1]) || 0,
|
||||
patch: Number(p[2]) || 0,
|
||||
};
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
JAVA RUNTIME SELECTOR
|
||||
------------------------------------------------------------- */
|
||||
function pickJavaRuntimeForMc(version) {
|
||||
const { major, minor, patch } = parseMcVersion(version);
|
||||
|
||||
if (major > 1) return 21;
|
||||
|
||||
if (major === 1) {
|
||||
if (minor >= 21) return 21;
|
||||
if (minor === 20 && patch >= 5) return 21;
|
||||
if (minor > 20) return 21;
|
||||
return 17;
|
||||
}
|
||||
|
||||
return 17;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
HOSTNAME GENERATION
|
||||
------------------------------------------------------------- */
|
||||
function generateSystemHostname({ game, variant, vmid }) {
|
||||
const g = (game || "").toLowerCase();
|
||||
const v = (variant || "").toLowerCase();
|
||||
|
||||
let prefix = "game";
|
||||
if (g.includes("minecraft")) prefix = "mc";
|
||||
else if (g.includes("terraria")) prefix = "terraria";
|
||||
else if (g.includes("valheim")) prefix = "valheim";
|
||||
else if (g.includes("rust")) prefix = "rust";
|
||||
|
||||
let varPart = "";
|
||||
if (g.includes("minecraft")) {
|
||||
if (["paper", "forge", "fabric", "vanilla", "purpur", "neoforge"].includes(v))
|
||||
varPart = v;
|
||||
}
|
||||
|
||||
return varPart ? `${prefix}-${varPart}-${vmid}` : `${prefix}-${vmid}`;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
ADMIN PASSWORD GENERATOR
|
||||
------------------------------------------------------------- */
|
||||
function generateAdminPassword() {
|
||||
return crypto.randomBytes(12).toString("base64url");
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
BUILD AGENT PAYLOAD
|
||||
------------------------------------------------------------- */
|
||||
function buildAgentPayload({
|
||||
vmid,
|
||||
game,
|
||||
variant,
|
||||
version,
|
||||
world,
|
||||
ports,
|
||||
artifactPath,
|
||||
javaPath,
|
||||
memoryMiB,
|
||||
steamUser,
|
||||
steamPass,
|
||||
steamAuth,
|
||||
adminUser,
|
||||
adminPass,
|
||||
}) {
|
||||
const g = (game || "minecraft").toLowerCase();
|
||||
const v = (variant || "").toLowerCase();
|
||||
const ver = version || "1.20.1";
|
||||
const w = world || "world";
|
||||
|
||||
if (!v) throw new Error("variant is required (paper, forge, fabric, vanilla, purpur)");
|
||||
|
||||
let art = artifactPath;
|
||||
let jpath = javaPath;
|
||||
|
||||
// --------- VARIANT → ARTIFACT PATH ---------
|
||||
if (!art && g === "minecraft") {
|
||||
switch (v) {
|
||||
case "paper":
|
||||
case "vanilla":
|
||||
case "purpur":
|
||||
art = `minecraft/${v}/${ver}/server.jar`;
|
||||
break;
|
||||
|
||||
case "forge":
|
||||
art = `minecraft/forge/${ver}/forge-installer.jar`;
|
||||
break;
|
||||
|
||||
case "fabric":
|
||||
art = `minecraft/fabric/${ver}/fabric-server.jar`;
|
||||
break;
|
||||
|
||||
case "neoforge":
|
||||
art = `minecraft/neoforge/${ver}/neoforge-installer.jar`;
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported Minecraft variant: ${v}`);
|
||||
}
|
||||
}
|
||||
|
||||
// --------- JAVA RUNTIME SELECTOR ----------
|
||||
if (!jpath && g === "minecraft") {
|
||||
const javaVersion = pickJavaRuntimeForMc(ver);
|
||||
jpath =
|
||||
javaVersion === 21
|
||||
? "java/21/OpenJDK21.tar.gz"
|
||||
: "java/17/OpenJDK17.tar.gz";
|
||||
}
|
||||
|
||||
// --------- MEMORY DEFAULTS ----------
|
||||
let mem = Number(memoryMiB) || 0;
|
||||
if (mem <= 0) mem = ["forge", "neoforge"].includes(v) ? 4096 : 2048;
|
||||
|
||||
// Steam + admin credentials (persisted, optional)
|
||||
const resolvedSteamUser = steamUser || "anonymous";
|
||||
const resolvedSteamPass = steamPass || "";
|
||||
const resolvedSteamAuth = steamAuth || "";
|
||||
|
||||
const resolvedAdminUser = adminUser || "admin";
|
||||
const resolvedAdminPass = adminPass || generateAdminPassword();
|
||||
|
||||
return {
|
||||
vmid,
|
||||
game: g,
|
||||
variant: v,
|
||||
version: ver,
|
||||
world: w,
|
||||
ports: Array.isArray(ports) ? ports : [ports].filter(Boolean),
|
||||
artifact_path: art,
|
||||
java_path: jpath,
|
||||
memory_mb: mem,
|
||||
|
||||
steam_user: resolvedSteamUser,
|
||||
steam_pass: resolvedSteamPass,
|
||||
steam_auth: resolvedSteamAuth,
|
||||
|
||||
admin_user: resolvedAdminUser,
|
||||
admin_pass: resolvedAdminPass,
|
||||
};
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
SEND CONFIG → triggers async provision+start in agent
|
||||
------------------------------------------------------------- */
|
||||
async function sendAgentConfig({ ip, payload }) {
|
||||
const url = `http://${ip}:${AGENT_PORT}/config`;
|
||||
const headers = { "Content-Type": "application/json" };
|
||||
if (AGENT_TOKEN) headers["Authorization"] = `Bearer ${AGENT_TOKEN}`;
|
||||
|
||||
const resp = await fetch(url, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!resp.ok) {
|
||||
const text = await resp.text().catch(() => "");
|
||||
throw new Error(`/config failed (${resp.status}): ${text}`);
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
WAIT FOR AGENT READY (poll /status)
|
||||
------------------------------------------------------------- */
|
||||
async function waitForAgentRunning({ ip, timeoutMs = 10 * 60_000 }) {
|
||||
const url = `http://${ip}:${AGENT_PORT}/status`;
|
||||
const headers = {};
|
||||
if (AGENT_TOKEN) headers["Authorization"] = `Bearer ${AGENT_TOKEN}`;
|
||||
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
let last;
|
||||
|
||||
while (Date.now() < deadline) {
|
||||
try {
|
||||
const resp = await fetch(url, { headers });
|
||||
if (!resp.ok) {
|
||||
last = new Error(`/status HTTP ${resp.status}`);
|
||||
} else {
|
||||
const data = await resp.json().catch(() => ({}));
|
||||
const state = (data.state || data.status || "").toLowerCase();
|
||||
|
||||
// Agent's state machine:
|
||||
// idle → installing → verifying → starting → running
|
||||
if (state === "running") return { state: "running", raw: data };
|
||||
if (state === "error" || state === "crashed") {
|
||||
const msg = data.error || "";
|
||||
throw new Error(`agent state=${state} ${msg ? `(${msg})` : ""}`);
|
||||
}
|
||||
|
||||
last = new Error(`agent state=${state || "unknown"}`);
|
||||
}
|
||||
} catch (err) {
|
||||
last = err;
|
||||
}
|
||||
|
||||
await sleep(3000);
|
||||
}
|
||||
|
||||
throw last || new Error("Agent did not reach running state");
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------
|
||||
MAIN PROVISION ENTRYPOINT
|
||||
------------------------------------------------------------- */
|
||||
export async function provisionAgentInstance(body = {}) {
|
||||
const {
|
||||
customerId,
|
||||
game,
|
||||
variant,
|
||||
version,
|
||||
world,
|
||||
ctype: rawCtype,
|
||||
name,
|
||||
cpuCores,
|
||||
memoryMiB,
|
||||
diskGiB,
|
||||
portsNeeded,
|
||||
artifactPath,
|
||||
javaPath,
|
||||
|
||||
// NEW optional fields
|
||||
steamUser,
|
||||
steamPass,
|
||||
steamAuth,
|
||||
adminUser,
|
||||
adminPass,
|
||||
} = body;
|
||||
|
||||
if (!customerId) throw new Error("customerId required");
|
||||
if (!game) throw new Error("game required");
|
||||
if (!variant) throw new Error("variant required");
|
||||
|
||||
const ctype = rawCtype || "game";
|
||||
const isMinecraft = game.toLowerCase().includes("minecraft");
|
||||
|
||||
let vmid;
|
||||
let allocatedPortsMap = null;
|
||||
let gamePorts = [];
|
||||
let ctIp;
|
||||
let instanceHostname;
|
||||
|
||||
try {
|
||||
console.log("[agentProvision] STEP 1: allocate VMID");
|
||||
vmid = await allocateVmid(ctype);
|
||||
|
||||
instanceHostname = generateSystemHostname({ game, variant, vmid });
|
||||
|
||||
console.log("[agentProvision] STEP 2: port allocation");
|
||||
if (!isMinecraft && (portsNeeded ?? 0) > 0) {
|
||||
gamePorts = await PortAllocationService.reserve({
|
||||
vmid,
|
||||
count: portsNeeded,
|
||||
portType: "game",
|
||||
});
|
||||
allocatedPortsMap = { game: gamePorts };
|
||||
} else {
|
||||
gamePorts = [25565];
|
||||
allocatedPortsMap = { game: gamePorts };
|
||||
}
|
||||
|
||||
const node = process.env.PROXMOX_NODE || "zlh-prod1";
|
||||
const bridge = ctype === "dev" ? "vmbr2" : "vmbr3";
|
||||
const cpu = cpuCores ? Number(cpuCores) : 2;
|
||||
const memory = memoryMiB ? Number(memoryMiB) : 2048;
|
||||
|
||||
const description = name
|
||||
? `${name} (customer=${customerId}; vmid=${vmid}; agent=v1)`
|
||||
: `customer=${customerId}; vmid=${vmid}; agent=v1`;
|
||||
|
||||
const tags = [
|
||||
`cust-${customerId}`,
|
||||
`type-${ctype}`,
|
||||
`game-${game}`,
|
||||
variant ? `var-${variant}` : null,
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(",");
|
||||
|
||||
console.log(
|
||||
`[agentProvision] STEP 3: clone template ${AGENT_TEMPLATE_VMID} → vmid=${vmid}`
|
||||
);
|
||||
|
||||
await cloneContainer({
|
||||
templateVmid: AGENT_TEMPLATE_VMID,
|
||||
vmid,
|
||||
name: instanceHostname,
|
||||
full: 1,
|
||||
});
|
||||
|
||||
console.log("[agentProvision] STEP 4: configure CPU/mem/bridge/tags");
|
||||
await configureContainer({
|
||||
vmid,
|
||||
cpu,
|
||||
memory,
|
||||
bridge,
|
||||
description,
|
||||
tags,
|
||||
});
|
||||
|
||||
console.log("[agentProvision] STEP 5: start container");
|
||||
await startWithRetry(vmid);
|
||||
|
||||
console.log("[agentProvision] STEP 6: detect container IP");
|
||||
const ip = await getCtIpWithRetry(vmid, node, 12, 10_000);
|
||||
if (!ip) throw new Error("Failed to detect container IP");
|
||||
ctIp = ip;
|
||||
|
||||
console.log(`[agentProvision] ctIp=${ctIp}`);
|
||||
|
||||
console.log("[agentProvision] STEP 7: build agent payload");
|
||||
const payload = buildAgentPayload({
|
||||
vmid,
|
||||
game,
|
||||
variant,
|
||||
version,
|
||||
world,
|
||||
ports: gamePorts,
|
||||
artifactPath,
|
||||
javaPath,
|
||||
memoryMiB,
|
||||
|
||||
steamUser,
|
||||
steamPass,
|
||||
steamAuth,
|
||||
adminUser,
|
||||
adminPass,
|
||||
});
|
||||
|
||||
console.log("[agentProvision] STEP 8: POST /config to agent (async provision+start)");
|
||||
await sendAgentConfig({ ip: ctIp, payload });
|
||||
|
||||
console.log("[agentProvision] STEP 9: wait for agent to be running via /status");
|
||||
const agentResult = await waitForAgentRunning({ ip: ctIp });
|
||||
|
||||
console.log("[agentProvision] STEP 10: DB save");
|
||||
const instance = await prisma.containerInstance.create({
|
||||
data: {
|
||||
vmid,
|
||||
customerId,
|
||||
ctype,
|
||||
hostname: instanceHostname,
|
||||
ip: ctIp,
|
||||
allocatedPorts: allocatedPortsMap,
|
||||
payload,
|
||||
agentState: agentResult.state,
|
||||
agentLastSeen: new Date(),
|
||||
},
|
||||
});
|
||||
|
||||
console.log("[agentProvision] STEP 11: commit ports");
|
||||
if (!isMinecraft && gamePorts.length) {
|
||||
await PortAllocationService.commit({
|
||||
vmid,
|
||||
ports: gamePorts,
|
||||
portType: "game",
|
||||
});
|
||||
}
|
||||
|
||||
console.log("[agentProvision] STEP 12: publish edge");
|
||||
await enqueuePublishEdge({
|
||||
vmid,
|
||||
slotHostname: instanceHostname,
|
||||
instanceHostname,
|
||||
ports: gamePorts,
|
||||
ctIp,
|
||||
game,
|
||||
});
|
||||
|
||||
await confirmVmidAllocated(vmid);
|
||||
|
||||
console.log("[agentProvision] COMPLETE");
|
||||
|
||||
return {
|
||||
vmid,
|
||||
ip: ctIp,
|
||||
hostname: instanceHostname,
|
||||
ports: gamePorts,
|
||||
instance,
|
||||
};
|
||||
} catch (err) {
|
||||
console.error("[agentProvision] ERROR:", err.message);
|
||||
|
||||
try {
|
||||
if (vmid) await PortAllocationService.releaseByVmid(vmid);
|
||||
} catch {}
|
||||
|
||||
try {
|
||||
if (vmid) await deleteContainer(vmid);
|
||||
} catch {}
|
||||
|
||||
try {
|
||||
if (vmid) await releaseVmid(vmid);
|
||||
} catch {}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
export default { provisionAgentInstance };
|
||||
61
src/app.js
Normal file
61
src/app.js
Normal file
@ -0,0 +1,61 @@
|
||||
// /opt/zpack-api/src/app.js
|
||||
import 'dotenv/config'
|
||||
import express from 'express'
|
||||
import portRoutes from './routes/ports.js'
|
||||
import containers from './routes/containers.js'
|
||||
import promSd from './routes/promSd.js'
|
||||
import proxRoute from './routes/proxmox.js'
|
||||
import instances from './routes/instances.js';
|
||||
import templatesRouter from './routes/templates.js';
|
||||
import edgeRoutes from './routes/edge.js';
|
||||
import debugRoutes from './routes/debug.js';
|
||||
|
||||
//Testing route
|
||||
import edgeTest from './routes/edge.test.js';
|
||||
|
||||
const app = express()
|
||||
|
||||
app.use('/api/debug', debugRoutes);
|
||||
app.use(express.json())
|
||||
app.use('/api/v2/ports', portRoutes)
|
||||
app.use('/api/containers', containers)
|
||||
app.use('/sd', promSd)
|
||||
app.use('/api/proxmox', proxRoute)
|
||||
app.use('/api/instances', instances);
|
||||
app.use(templatesRouter);
|
||||
app.use('/api/edge', edgeRoutes);
|
||||
|
||||
//testing route
|
||||
app.use('/api/test', edgeTest);
|
||||
|
||||
// --- DEV ERROR HANDLER (temporary) ---
|
||||
app.use((err, req, res, next) => {
|
||||
const status = err.httpCode || 500;
|
||||
const payload = {
|
||||
ok: false,
|
||||
error: err.message || String(err),
|
||||
};
|
||||
if (process.env.NODE_ENV !== 'production' && err && err.stack) {
|
||||
payload.stack = err.stack.split('\n').slice(0, 12); // first lines only
|
||||
}
|
||||
console.error('[ERR]', err);
|
||||
res.status(status).json(payload);
|
||||
});
|
||||
|
||||
|
||||
// Health check
|
||||
app.get('/health', (_req, res) => {
|
||||
res.json({ status: 'ok', timestamp: new Date().toISOString() })
|
||||
})
|
||||
|
||||
// ---- add this block ----
|
||||
const PORT = Number(process.env.PORT || 3000)
|
||||
const HOST = process.env.HOST || '0.0.0.0'
|
||||
app.listen(PORT, HOST, () => {
|
||||
console.log(`ZeroLagHub API listening on http://${HOST}:${PORT}`)
|
||||
})
|
||||
// ------------------------
|
||||
|
||||
|
||||
|
||||
export default app
|
||||
149
src/audit/dnsReconcile.js
Normal file
149
src/audit/dnsReconcile.js
Normal file
@ -0,0 +1,149 @@
|
||||
/**
|
||||
* ZeroLagHub – DNS Reconciliation Utility (Final)
|
||||
* ------------------------------------------------
|
||||
* Compares DB + Proxmox + DNS (Technitium + Cloudflare)
|
||||
* Produces a 3-way sync summary, optional cleanup, and optional JSON output.
|
||||
*/
|
||||
|
||||
import prisma from "../services/prisma.js";
|
||||
import * as technitium from "../services/technitiumClient.js";
|
||||
import * as cloudflare from "../services/cloudflareClient.js";
|
||||
import proxmox from "../services/proxmoxClient.js";
|
||||
import { unpublish } from "../services/dePublisher.js";
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
const ZONE = process.env.TECHNITIUM_ZONE || "zerolaghub.quest";
|
||||
const zoneDot = `.${ZONE}`;
|
||||
|
||||
// Normalize hostnames
|
||||
function normalizeHost(name) {
|
||||
if (!name) return null;
|
||||
let h = name.toString().trim().toLowerCase();
|
||||
|
||||
// Strip SRV prefixes (_minecraft._tcp. or __minecraft.__tcp.)
|
||||
h = h.replace(/^_+minecraft\._+tcp\./, "");
|
||||
|
||||
// Remove trailing dots
|
||||
h = h.replace(/\.*$/, "");
|
||||
|
||||
// Ensure fully qualified domain
|
||||
if (!h.endsWith(zoneDot) && !h.includes(".")) h = `${h}${zoneDot}`;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
// Return short + FQDN variants
|
||||
function variants(host) {
|
||||
const fqdn = normalizeHost(host);
|
||||
if (!fqdn) return [];
|
||||
const short = fqdn.endsWith(zoneDot) ? fqdn.slice(0, -zoneDot.length) : fqdn;
|
||||
return [fqdn, short];
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Main reconciliation */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
export async function reconcileDNS({ apply = false, json = false } = {}) {
|
||||
console.log(`🔍 Starting DNS reconciliation (${apply ? "apply" : "dry run"})...`);
|
||||
|
||||
/* ---------- 1️⃣ Database ---------- */
|
||||
const dbInstances = await prisma.containerInstance.findMany({
|
||||
select: { hostname: true },
|
||||
});
|
||||
const dbHosts = new Set();
|
||||
for (const i of dbInstances) variants(i.hostname).forEach(v => dbHosts.add(v));
|
||||
|
||||
/* ---------- 2️⃣ Proxmox ---------- */
|
||||
let containers = [];
|
||||
try {
|
||||
containers = await proxmox.listContainers();
|
||||
} catch (err) {
|
||||
console.warn(`[API] ⚠️ Could not fetch Proxmox containers: ${err.message}`);
|
||||
}
|
||||
const proxHosts = new Set();
|
||||
for (const c of containers) variants(c.hostname).forEach(v => proxHosts.add(v));
|
||||
|
||||
/* ---------- 3️⃣ DNS ---------- */
|
||||
const techRecords = await technitium.listRecords();
|
||||
const cfRecords = await cloudflare.listAllRecords();
|
||||
const dnsHosts = new Set();
|
||||
for (const r of [...techRecords, ...cfRecords]) {
|
||||
if (!["A", "SRV"].includes(r.type)) continue;
|
||||
const normalized = normalizeHost(r.name);
|
||||
if (normalized) dnsHosts.add(normalized);
|
||||
}
|
||||
|
||||
/* ---------- 4️⃣ Comparison ---------- */
|
||||
const IGNORE = new Set([
|
||||
normalizeHost("zerolaghub.quest"),
|
||||
normalizeHost("ns1.zerolaghub.quest"),
|
||||
]);
|
||||
|
||||
const orphans = [];
|
||||
for (const fq of dnsHosts) {
|
||||
const [fqdn, short] = variants(fq);
|
||||
if (IGNORE.has(fqdn) || IGNORE.has(short)) continue;
|
||||
if (!dbHosts.has(fqdn) && !proxHosts.has(fqdn) && !dbHosts.has(short) && !proxHosts.has(short)) {
|
||||
orphans.push(fqdn);
|
||||
}
|
||||
}
|
||||
|
||||
const dbOnly = [...dbHosts].filter(h => !proxHosts.has(h));
|
||||
const proxOnly = [...proxHosts].filter(h => !dbHosts.has(h));
|
||||
|
||||
/* ---------- 5️⃣ JSON or Pretty Output ---------- */
|
||||
const summary = {
|
||||
timestamp: new Date().toISOString(),
|
||||
counts: {
|
||||
db: dbInstances.length,
|
||||
proxmox: containers.length,
|
||||
technitium: techRecords.length,
|
||||
cloudflare: cfRecords.length,
|
||||
dnsHosts: dnsHosts.size,
|
||||
dbOnly: dbOnly.length,
|
||||
proxOnly: proxOnly.length,
|
||||
orphans: orphans.length,
|
||||
},
|
||||
dbOnly,
|
||||
proxOnly,
|
||||
orphans,
|
||||
};
|
||||
|
||||
if (json) {
|
||||
console.log(JSON.stringify(summary, null, 2));
|
||||
return summary;
|
||||
}
|
||||
|
||||
console.log(`\n🧾 ===== Environment Sync Summary =====`);
|
||||
console.log(`📘 DB-only hosts (not in Proxmox): ${dbOnly.length}`);
|
||||
if (dbOnly.length) dbOnly.forEach(h => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\n🖥️ Proxmox-only hosts (not in DB): ${proxOnly.length}`);
|
||||
if (proxOnly.length) proxOnly.forEach(h => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\n☁️ DNS-only (orphans): ${orphans.length}`);
|
||||
if (orphans.length) orphans.forEach(h => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\nCounts → DB:${summary.counts.db} | Proxmox:${summary.counts.proxmox} | DNS:${summary.counts.dnsHosts}`);
|
||||
|
||||
/* ---------- 6️⃣ Optional Cleanup ---------- */
|
||||
if (apply && orphans.length) {
|
||||
console.log("\n🧹 Cleaning up orphaned records...");
|
||||
for (const hostname of orphans) {
|
||||
try {
|
||||
await unpublish({ hostname, game: "minecraft", ports: [25565] });
|
||||
await prisma.deletedInstance.create({
|
||||
data: { hostname, origin: "reconcile" },
|
||||
});
|
||||
console.log(` ✓ Unpublished and logged ${hostname}`);
|
||||
} catch (err) {
|
||||
console.warn(` ⚠️ Failed to unpublish ${hostname}: ${err.message}`);
|
||||
}
|
||||
}
|
||||
console.log("\n✅ Cleanup complete.");
|
||||
} else if (!apply) {
|
||||
console.log("\n(dry run — no changes made)");
|
||||
}
|
||||
}
|
||||
157
src/jobs/provisionProcessor.js
Normal file
157
src/jobs/provisionProcessor.js
Normal file
@ -0,0 +1,157 @@
|
||||
import { Worker } from 'bullmq'
|
||||
import prisma from '../services/prisma.js';
|
||||
import { runProvisionPlaybook } from '../ansible/runProvision.js'
|
||||
import { redisOptions } from '../utils/redis.js'
|
||||
|
||||
|
||||
|
||||
export const provisionWorker = new Worker('zpack-provision', async (job) => {
|
||||
const { customerId, gameType, memory, cores, template } = job.data
|
||||
|
||||
try {
|
||||
// Update job status to running
|
||||
await updateJobStatus(job.data.jobId, 'RUNNING', 'Starting container provisioning')
|
||||
await job.updateProgress(10)
|
||||
|
||||
// Get customer port allocation
|
||||
const portAllocation = await getCustomerPorts(customerId)
|
||||
if (!portAllocation) {
|
||||
throw new Error('No port allocation found for customer')
|
||||
}
|
||||
|
||||
await job.updateProgress(25)
|
||||
await updateJobStatus(job.data.jobId, 'RUNNING', 'Running Ansible playbook')
|
||||
|
||||
// Get next available VMID
|
||||
const vmid = await getNextVMID()
|
||||
|
||||
// Prepare data for your Ansible runner
|
||||
const ansibleData = {
|
||||
user_id: customerId,
|
||||
game: gameType,
|
||||
ports: portAllocation.ports.length,
|
||||
mode: 'survival', // or from job data
|
||||
vmid: vmid,
|
||||
base_port: portAllocation.basePort,
|
||||
allocated_ports: portAllocation.ports.join(','),
|
||||
bridge: gameType.includes('dev') ? 'vmbr2' : 'vmbr3',
|
||||
memory: memory || 4096,
|
||||
cores: cores || 2,
|
||||
template: template || `base-${gameType}-v1.0`
|
||||
}
|
||||
|
||||
await job.updateProgress(50)
|
||||
|
||||
// Run your Ansible playbook
|
||||
const ansibleOutput = await runProvisionPlaybook(ansibleData)
|
||||
|
||||
await job.updateProgress(75)
|
||||
|
||||
// Create server instance record (extract IP from Ansible output if needed)
|
||||
const serverInstance = await prisma.serverInstance.create({
|
||||
data: {
|
||||
customerId,
|
||||
vmid: vmid,
|
||||
hostname: `${gameType}-${customerId}`,
|
||||
ip: extractIPFromOutput(ansibleOutput) || null, // Parse from Ansible output
|
||||
node: 'zlh-prod1', // or extract from output
|
||||
status: 'RUNNING',
|
||||
game: gameType,
|
||||
template: template,
|
||||
memory: memory,
|
||||
cores: cores
|
||||
}
|
||||
})
|
||||
|
||||
// Create server ports
|
||||
await Promise.all(
|
||||
portAllocation.ports.map((port, index) =>
|
||||
prisma.serverPort.create({
|
||||
data: {
|
||||
serverId: serverInstance.id,
|
||||
port: port,
|
||||
purpose: index === 0 ? 'game' : `secondary-${index}`
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
await job.updateProgress(100)
|
||||
await updateJobStatus(job.data.jobId, 'COMPLETED', `Container ${vmid} provisioned successfully`)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
serverId: serverInstance.id,
|
||||
vmid: vmid,
|
||||
ip: extractIPFromOutput(ansibleOutput),
|
||||
ports: portAllocation.ports,
|
||||
customerId: customerId
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('Provision job failed:', error)
|
||||
await updateJobStatus(job.data.jobId, 'FAILED', error.message)
|
||||
throw error
|
||||
}
|
||||
}, {
|
||||
connection: redisOptions // Use your standardized Redis config
|
||||
})
|
||||
|
||||
// Helper functions
|
||||
async function updateJobStatus(jobId, status, logs) {
|
||||
await prisma.provisioningJob.update({
|
||||
where: { id: jobId },
|
||||
data: {
|
||||
status,
|
||||
logs,
|
||||
updatedAt: new Date(),
|
||||
...(status === 'COMPLETED' && { completedAt: new Date() }),
|
||||
...(status === 'RUNNING' && !await jobHasStartTime(jobId) && { startedAt: new Date() })
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async function jobHasStartTime(jobId) {
|
||||
const job = await prisma.provisioningJob.findUnique({
|
||||
where: { id: jobId },
|
||||
select: { startedAt: true }
|
||||
})
|
||||
return job?.startedAt !== null
|
||||
}
|
||||
|
||||
async function getCustomerPorts(customerId) {
|
||||
const allocation = await prisma.portAllocation.findFirst({
|
||||
where: { customerId }
|
||||
})
|
||||
|
||||
if (!allocation) return null
|
||||
|
||||
return {
|
||||
basePort: allocation.basePort,
|
||||
ports: Array.from({length: allocation.count}, (_, i) => allocation.basePort + i)
|
||||
}
|
||||
}
|
||||
|
||||
async function getNextVMID() {
|
||||
const usedVMIDs = await prisma.serverInstance.findMany({
|
||||
select: { vmid: true },
|
||||
where: { vmid: { not: null } }
|
||||
})
|
||||
|
||||
const used = new Set(usedVMIDs.map(s => s.vmid))
|
||||
|
||||
// Game servers: 400-699, Dev servers: 300-399
|
||||
for (let vmid = 400; vmid < 700; vmid++) {
|
||||
if (!used.has(vmid)) {
|
||||
return vmid
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('No available VMIDs in range 400-699')
|
||||
}
|
||||
|
||||
function extractIPFromOutput(output) {
|
||||
// Adjust this regex based on your Ansible output format
|
||||
const ipMatch = output?.match(/(?:container_ip|assigned_ip)["\s:]*([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})/)
|
||||
return ipMatch?.[1] || null
|
||||
}
|
||||
24
src/jobs/reconcileEdges.js
Normal file
24
src/jobs/reconcileEdges.js
Normal file
@ -0,0 +1,24 @@
|
||||
// src/jobs/reconcileEdges.js
|
||||
// Simple, idempotent reconciler: re-apply publishEdge for all running instances
|
||||
// Later you can diff against actual OPNsense/Technitium state to do true healing.
|
||||
|
||||
import prisma from '../services/prisma.js';
|
||||
import { publishEdge } from '../services/edgePublisher.js';
|
||||
|
||||
export async function reconcileEdgesOnce() {
|
||||
const running = await prisma.containerInstance.findMany({
|
||||
where: { status: 'running' },
|
||||
select: { vmid: true, ip: true, name: true, ports: true },
|
||||
});
|
||||
|
||||
for (const r of running) {
|
||||
try {
|
||||
if (!r.ip || !r.ports || r.ports.length === 0) continue;
|
||||
await publishEdge({ vmid: r.vmid, ctIp: r.ip, hostname: r.name, ports: r.ports });
|
||||
} catch (e) {
|
||||
console.warn('reconcileEdges failed', { vmid: r.vmid, msg: e?.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default { reconcileEdgesOnce };
|
||||
15
src/jobs/reconcilePorts.js
Normal file
15
src/jobs/reconcilePorts.js
Normal file
@ -0,0 +1,15 @@
|
||||
// src/jobs/reconcilePorts.js
|
||||
import prisma from '../services/prisma.js';
|
||||
import { PortPool } from '../services/portPool.js'
|
||||
|
||||
|
||||
|
||||
export async function reconcilePorts() {
|
||||
const allocated = await prisma.portPool.findMany({ where: { status: 'allocated' } })
|
||||
const vmids = new Set((await prisma.containerInstance.findMany({ select: { vmid: true } })).map(x => x.vmid))
|
||||
for (const p of allocated) {
|
||||
if (!vmids.has(p.vmid ?? -1)) {
|
||||
await PortPool.releaseByVmid(p.vmid) // idempotent if you code it so
|
||||
}
|
||||
}
|
||||
}
|
||||
103
src/prisma/seed-all.js
Normal file
103
src/prisma/seed-all.js
Normal file
@ -0,0 +1,103 @@
|
||||
// src/prisma/seed-all.js
|
||||
import { PrismaClient } from "@prisma/client";
|
||||
|
||||
const prisma = new PrismaClient();
|
||||
|
||||
async function seedTemplates() {
|
||||
console.log("▶ Seeding ContainerTemplate…");
|
||||
|
||||
const templates = [
|
||||
{ slug: "mc-vanilla", game: "minecraft", variant: "vanilla", ctype: "game", templateVmid: 900, defBridge: "vmbr3" },
|
||||
{ slug: "mc-paper", game: "minecraft", variant: "paper", ctype: "game", templateVmid: 901, defBridge: "vmbr3" },
|
||||
{ slug: "mc-forge", game: "minecraft", variant: "forge", ctype: "game", templateVmid: 902, defBridge: "vmbr3" },
|
||||
{ slug: "mc-fabric", game: "minecraft", variant: "fabric", ctype: "game", templateVmid: 903, defBridge: "vmbr3" },
|
||||
{ slug: "mc-bedrock", game: "minecraft", variant: "bedrock", ctype: "game", templateVmid: 904, defBridge: "vmbr3" },
|
||||
{ slug: "mc-pocketmine", game: "minecraft", variant: "pocketmine", ctype: "game", templateVmid: 905, defBridge: "vmbr3" },
|
||||
{ slug: "rust", game: "rust", variant: "vanilla", ctype: "game", templateVmid: 906, defBridge: "vmbr3" },
|
||||
{ slug: "pz", game: "pz", variant: "vanilla", ctype: "game", templateVmid: 907, defBridge: "vmbr3" },
|
||||
{ slug: "valheim", game: "valheim", variant: "vanilla", ctype: "game", templateVmid: 908, defBridge: "vmbr3" },
|
||||
{ slug: "valheim-plus", game: "valheim", variant: "plus", ctype: "game", templateVmid: 909, defBridge: "vmbr3" },
|
||||
{ slug: "valheim-bepinex", game: "valheim", variant: "bepinex", ctype: "game", templateVmid: 910, defBridge: "vmbr3" },
|
||||
{ slug: "terraria-tmod", game: "terraria", variant: "tmod", ctype: "game", templateVmid: 911, defBridge: "vmbr3" },
|
||||
{ slug: "terraria-tshock", game: "terraria", variant: "tshock", ctype: "game", templateVmid: 912, defBridge: "vmbr3" },
|
||||
];
|
||||
|
||||
for (const t of templates) {
|
||||
await prisma.containerTemplate.upsert({
|
||||
where: { slug: t.slug },
|
||||
update: {},
|
||||
create: {
|
||||
slug: t.slug,
|
||||
game: t.game,
|
||||
variant: t.variant,
|
||||
ctype: t.ctype,
|
||||
templateVmid: t.templateVmid,
|
||||
defBridge: t.defBridge,
|
||||
resources: { memory: 2048, disk: 20, cpu: 2 },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
console.log("✔ ContainerTemplate seeding complete.");
|
||||
}
|
||||
|
||||
async function seedVmidCounters() {
|
||||
console.log("▶ Seeding VmidCounter…");
|
||||
|
||||
const rows = [
|
||||
{ key: "game", current: 5000 },
|
||||
{ key: "dev", current: 6000 },
|
||||
];
|
||||
|
||||
for (const row of rows) {
|
||||
await prisma.vmidCounter.upsert({
|
||||
where: { key: row.key },
|
||||
update: {},
|
||||
create: row,
|
||||
});
|
||||
}
|
||||
|
||||
console.log("✔ VmidCounter seeded.");
|
||||
}
|
||||
|
||||
async function seedPortPool() {
|
||||
console.log("▶ Seeding PortPool (Game Ports)…");
|
||||
|
||||
const START = 50000;
|
||||
const COUNT = 1000; // 50000–50999
|
||||
|
||||
const entries = [];
|
||||
|
||||
for (let i = 0; i < COUNT; i++) {
|
||||
entries.push({
|
||||
port: START + i,
|
||||
portType: "game",
|
||||
status: "free",
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.portPool.createMany({
|
||||
data: entries,
|
||||
skipDuplicates: true,
|
||||
});
|
||||
|
||||
console.log(`✔ PortPool seeded (${COUNT} ports).`);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log("== ZeroLagHub schema seed-all starting ==");
|
||||
|
||||
await seedTemplates();
|
||||
await seedVmidCounters();
|
||||
await seedPortPool();
|
||||
|
||||
console.log("== Seed complete ==");
|
||||
}
|
||||
|
||||
main()
|
||||
.catch((err) => {
|
||||
console.error("❌ SEED FAILED");
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
})
|
||||
.finally(async () => prisma.$disconnect());
|
||||
88
src/prisma/seed.js
Normal file
88
src/prisma/seed.js
Normal file
@ -0,0 +1,88 @@
|
||||
// prisma/seed.js
|
||||
// Seeds ContainerTemplate + HostSlot (slots for Minecraft variants)
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
function pad4(n) { return String(n).padStart(4, '0') }
|
||||
|
||||
async function upsertTemplates() {
|
||||
const templates = [
|
||||
{ slug: 'mc-vanilla', game: 'minecraft', variant: 'vanilla', ctype: 'game', templateVmid: 200, defBridge: 'vmbr3' },
|
||||
{ slug: 'mc-paper', game: 'minecraft', variant: 'paper', ctype: 'game', templateVmid: 201, defBridge: 'vmbr3' },
|
||||
{ slug: 'mc-forge', game: 'minecraft', variant: 'forge', ctype: 'game', templateVmid: 202, defBridge: 'vmbr3' },
|
||||
{ slug: 'mc-fabric', game: 'minecraft', variant: 'fabric', ctype: 'game', templateVmid: 203, defBridge: 'vmbr3' },
|
||||
{ slug: 'mc-bedrock', game: 'minecraft', variant: 'bedrock', ctype: 'game', templateVmid: 204, defBridge: 'vmbr3' },
|
||||
{ slug: 'mc-pocketmine', game: 'minecraft', variant: 'pocketmine', ctype: 'game', templateVmid: 205, defBridge: 'vmbr3' },
|
||||
{ slug: 'rust', game: 'rust', variant: 'vanilla', ctype: 'game', templateVmid: 206, defBridge: 'vmbr3' },
|
||||
{ slug: 'pz', game: 'pz', variant: 'vanilla', ctype: 'game', templateVmid: 207, defBridge: 'vmbr3' },
|
||||
{ slug: 'valheim', game: 'valheim', variant: 'vanilla', ctype: 'game', templateVmid: 208, defBridge: 'vmbr3' },
|
||||
{ slug: 'valheim-plus', game: 'valheim', variant: 'plus', ctype: 'game', templateVmid: 209, defBridge: 'vmbr3' },
|
||||
{ slug: 'valheim-bepinex', game: 'valheim', variant: 'bepinex', ctype: 'game', templateVmid: 210, defBridge: 'vmbr3' },
|
||||
{ slug: 'terraria-tmod', game: 'terraria', variant: 'tmod', ctype: 'game', templateVmid: 211, defBridge: 'vmbr3' },
|
||||
{ slug: 'terraria-tshock', game: 'terraria', variant: 'tshock', ctype: 'game', templateVmid: 212, defBridge: 'vmbr3' },
|
||||
]
|
||||
|
||||
for (const t of templates) {
|
||||
await prisma.containerTemplate.upsert({
|
||||
where: { slug: t.slug },
|
||||
update: {
|
||||
templateVmid: t.templateVmid,
|
||||
game: t.game, variant: t.variant, ctype: t.ctype,
|
||||
resources: { memory: 2048, disk: 20, cpu: 2 },
|
||||
network: { bridge: t.defBridge },
|
||||
},
|
||||
create: {
|
||||
slug: t.slug, game: t.game, variant: t.variant, ctype: t.ctype,
|
||||
templateVmid: t.templateVmid,
|
||||
resources: { memory: 2048, disk: 20, cpu: 2 },
|
||||
network: { bridge: t.defBridge },
|
||||
}
|
||||
})
|
||||
}
|
||||
console.log('ContainerTemplate upsert complete.')
|
||||
}
|
||||
|
||||
async function seedHostSlots({ game, variant, base, count, label, edgeIp = null }) {
|
||||
const rows = []
|
||||
for (let slot = 0; slot < count; slot++) {
|
||||
const port = base + slot
|
||||
const hostname = `${label}-${pad4(slot)}.zpack.zerolaghub.com`
|
||||
rows.push({
|
||||
game, variant, slot, basePort: base, port, hostname, edgeIp, status: 'free'
|
||||
})
|
||||
}
|
||||
// Insert in chunks to avoid packet size limits
|
||||
const CHUNK = 1000
|
||||
for (let i = 0; i < rows.length; i += CHUNK) {
|
||||
await prisma.hostSlot.createMany({ data: rows.slice(i, i + CHUNK), skipDuplicates: true })
|
||||
}
|
||||
}
|
||||
|
||||
async function upsertHostSlots() {
|
||||
// Minecraft Vanilla: 1000 slots (50000–50999) label "mcv"
|
||||
await seedHostSlots({ game: 'minecraft', variant: 'vanilla', base: 50000, count: 1000, label: 'mcv' })
|
||||
|
||||
// Minecraft Paper: 1000 slots (51000–51999) label "mcp"
|
||||
await seedHostSlots({ game: 'minecraft', variant: 'paper', base: 51000, count: 1000, label: 'mcp' })
|
||||
|
||||
// You can add others when ready; leaving commented to keep IP budget simple for now:
|
||||
// await seedHostSlots({ game: 'rust', variant: 'vanilla', base: 52000, count: 1000, label: 'rst' })
|
||||
// await seedHostSlots({ game: 'pz', variant: 'vanilla', base: 53000, count: 500, label: 'pz' })
|
||||
// await seedHostSlots({ game: 'valheim', variant: 'vanilla', base: 54000, count: 500, label: 'val' })
|
||||
// await seedHostSlots({ game: 'valheim', variant: 'plus', base: 54500, count: 250, label: 'valp' })
|
||||
// await seedHostSlots({ game: 'valheim', variant: 'bepinex', base: 54750, count: 250, label: 'valb' })
|
||||
// await seedHostSlots({ game: 'terraria', variant: 'tmod', base: 55000, count: 500, label: 'tmod' })
|
||||
// await seedHostSlots({ game: 'terraria', variant: 'tshock', base: 55500, count: 500, label: 'tshock' })
|
||||
|
||||
console.log('HostSlot seed complete.')
|
||||
}
|
||||
|
||||
async function main() {
|
||||
await upsertTemplates()
|
||||
await upsertHostSlots()
|
||||
}
|
||||
|
||||
main()
|
||||
.then(() => console.log('Seed complete.'))
|
||||
.catch((e) => { console.error(e); process.exit(1) })
|
||||
.finally(async () => prisma.$disconnect())
|
||||
27
src/prisma/seedports.js
Normal file
27
src/prisma/seedports.js
Normal file
@ -0,0 +1,27 @@
|
||||
// prisma/seedports.js
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
async function seedPortPool(start, end, proto = 'tcp') {
|
||||
const BATCH = 1000
|
||||
for (let p = start; p <= end; p += BATCH) {
|
||||
const rows = []
|
||||
for (let x = p; x < p + BATCH && x <= end; x++) {
|
||||
rows.push({ port: x, protocol: proto, status: 'free' })
|
||||
}
|
||||
await prisma.portPool.createMany({ data: rows, skipDuplicates: true })
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
// TCP range used for public gameplay ports
|
||||
await seedPortPool(50000, 59999, 'tcp')
|
||||
|
||||
// If/when you want UDP too, uncomment:
|
||||
// await seedPortPool(50000, 59999, 'udp')
|
||||
}
|
||||
|
||||
main()
|
||||
.then(() => console.log('PortPool seeded.'))
|
||||
.catch((e) => { console.error(e); process.exit(1) })
|
||||
.finally(async () => prisma.$disconnect())
|
||||
167
src/queues/postProvision.js
Normal file
167
src/queues/postProvision.js
Normal file
@ -0,0 +1,167 @@
|
||||
// src/queues/postProvision.js
|
||||
// Post-provision edge publish queue (BullMQ v4+). Worker commits/rolls back.
|
||||
// Self-heals missing ctIp/ports by querying DB/Proxmox.
|
||||
// Server startup is now handled by the agent (not here).
|
||||
|
||||
import pkg from 'bullmq';
|
||||
import IORedis from 'ioredis';
|
||||
import prisma from '../services/prisma.js';
|
||||
import proxmox from '../services/proxmoxClient.js';
|
||||
import { PortAllocationService } from '../services/portAllocator.js';
|
||||
import edgePublisher from '../services/edgePublisher.js';
|
||||
|
||||
const { Queue, Worker, QueueEvents } = pkg;
|
||||
|
||||
const REDIS_URL = process.env.REDIS_URL || 'redis://127.0.0.1:6379';
|
||||
const connection = new IORedis(REDIS_URL, { maxRetriesPerRequest: null });
|
||||
|
||||
const QUEUE_NAME = 'post-provision';
|
||||
const queue = new Queue(QUEUE_NAME, { connection });
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Queue events
|
||||
// ---------------------------------------------------------------------------
|
||||
const events = new QueueEvents(QUEUE_NAME, { connection });
|
||||
events.on('completed', ({ jobId, returnvalue }) => {
|
||||
try {
|
||||
console.log(
|
||||
`[postProvision] job ${jobId} completed`,
|
||||
typeof returnvalue === 'string' ? JSON.parse(returnvalue) : returnvalue
|
||||
);
|
||||
} catch {}
|
||||
});
|
||||
events.on('failed', ({ jobId, failedReason }) => {
|
||||
console.warn(`[postProvision] job ${jobId} failed:`, failedReason);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Enqueue
|
||||
// ---------------------------------------------------------------------------
|
||||
export async function enqueuePublishEdge(payload) {
|
||||
// payload: { vmid, slotHostname, instanceHostname, ports, ctIp, game }
|
||||
const opts = {
|
||||
attempts: 5,
|
||||
backoff: { type: 'exponential', delay: 2000 },
|
||||
removeOnComplete: 1000,
|
||||
removeOnFail: 1000,
|
||||
};
|
||||
return queue.add('publish', payload, opts);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Best-effort fetch of CT IPv4 if not provided. */
|
||||
async function resolveCtIp({ vmid, hintIp }) {
|
||||
if (hintIp) return hintIp;
|
||||
|
||||
// Try pulling from DB
|
||||
const inst = await prisma.containerInstance.findUnique({
|
||||
where: { vmid: Number(vmid) },
|
||||
select: { ip: true },
|
||||
});
|
||||
if (inst?.ip) return inst.ip;
|
||||
|
||||
// Try inspecting the container directly
|
||||
try {
|
||||
const cmd = `ip -4 -o addr show dev eth0 | awk '{print $4}' | cut -d/ -f1 | head -n1`;
|
||||
const out = await proxmox.execInContainer(vmid, cmd);
|
||||
const ip = String(out || '').trim();
|
||||
if (ip) return ip;
|
||||
} catch {}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Resolve public ports for publishing DNS/Velocity. */
|
||||
async function resolvePublicPorts({ vmid, hintPorts }) {
|
||||
// If the job already provided ports, trust them
|
||||
if (Array.isArray(hintPorts) && hintPorts.length) return hintPorts;
|
||||
|
||||
// Pull from DB (new schema uses allocatedPorts JSON: { game: [xxxx] })
|
||||
const inst = await prisma.containerInstance.findUnique({
|
||||
where: { vmid: Number(vmid) },
|
||||
select: { allocatedPorts: true },
|
||||
});
|
||||
|
||||
if (inst?.allocatedPorts && typeof inst.allocatedPorts === 'object') {
|
||||
const gamePorts = inst.allocatedPorts.game;
|
||||
if (Array.isArray(gamePorts) && gamePorts.length) {
|
||||
return gamePorts;
|
||||
}
|
||||
}
|
||||
|
||||
// As last resort: no ports found
|
||||
return [];
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Worker
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const worker = new Worker(
|
||||
QUEUE_NAME,
|
||||
async (job) => {
|
||||
const { vmid, ports = [], ctIp, slotHostname, game } = job.data || {};
|
||||
if (!vmid) throw new Error('invalid job payload: missing vmid');
|
||||
|
||||
// ----------------------------
|
||||
// 1. Resolve ports + container IP
|
||||
// ----------------------------
|
||||
const resolvedPorts = await resolvePublicPorts({
|
||||
vmid,
|
||||
hintPorts: ports,
|
||||
});
|
||||
|
||||
const ip = await resolveCtIp({
|
||||
vmid,
|
||||
hintIp: ctIp,
|
||||
});
|
||||
|
||||
if (!resolvedPorts.length)
|
||||
throw new Error('invalid job payload: cannot resolve port(s)');
|
||||
if (!ip)
|
||||
throw new Error('invalid job payload: cannot resolve CT IP');
|
||||
|
||||
// ----------------------------
|
||||
// 2. Publish DNS + Velocity
|
||||
// ----------------------------
|
||||
await edgePublisher.publishEdge({
|
||||
vmid,
|
||||
ports: resolvedPorts,
|
||||
ip,
|
||||
slotHostname,
|
||||
game,
|
||||
});
|
||||
|
||||
// ----------------------------
|
||||
// 3. Commit port allocations
|
||||
// ----------------------------
|
||||
await PortAllocationService.commit({
|
||||
vmid,
|
||||
ports: resolvedPorts,
|
||||
portType: 'game',
|
||||
});
|
||||
|
||||
// ----------------------------
|
||||
// 4. Return worker result
|
||||
// ----------------------------
|
||||
return {
|
||||
vmid,
|
||||
ports: resolvedPorts,
|
||||
ip,
|
||||
dns: true,
|
||||
};
|
||||
},
|
||||
{ connection }
|
||||
);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Failure Handler
|
||||
// ---------------------------------------------------------------------------
|
||||
worker.on('failed', async (job, err) => {
|
||||
console.warn(`[postProvision] job ${job?.id} failed:`, err?.message || err);
|
||||
});
|
||||
|
||||
export default { enqueuePublishEdge };
|
||||
15
src/queues/provisionQueue.js
Normal file
15
src/queues/provisionQueue.js
Normal file
@ -0,0 +1,15 @@
|
||||
import { Queue } from 'bullmq'
|
||||
import { redisOptions } from '../utils/redis.js'
|
||||
|
||||
export const provisionQueue = new Queue('zpack-provision', {
|
||||
connection: redisOptions,
|
||||
defaultJobOptions: {
|
||||
attempts: 3,
|
||||
backoff: {
|
||||
type: 'exponential',
|
||||
delay: 2000,
|
||||
},
|
||||
removeOnComplete: 10,
|
||||
removeOnFail: 5,
|
||||
}
|
||||
})
|
||||
272
src/routes/containers.controls.js
Normal file
272
src/routes/containers.controls.js
Normal file
@ -0,0 +1,272 @@
|
||||
// src/routes/containers.controls.js
|
||||
// Container lifecycle controls. DELETE wires full rollback with dePublisher.
|
||||
// SAFE orphan handling: never guess hostnames; require DB, Proxmox, or explicit hostname.
|
||||
|
||||
import express from "express";
|
||||
import prisma from "../services/prisma.js";
|
||||
import proxmoxClient from "../services/proxmoxClient.js";
|
||||
import dePublisher from "../services/dePublisher.js";
|
||||
import * as technitium from "../services/technitiumClient.js";
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Try to read the container's hostname from Proxmox (if the CT still exists)
|
||||
*/
|
||||
async function getProxmoxHostname(vmid) {
|
||||
try {
|
||||
const cfg = await proxmoxClient.getContainerConfig(vmid);
|
||||
if (cfg.hostname) return cfg.hostname;
|
||||
if (cfg.name) return cfg.name;
|
||||
} catch {
|
||||
// best-effort only
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /api/containers/:vmid
|
||||
*
|
||||
* Safe teardown:
|
||||
* 1) Archive DB record in DeletedInstance (no duplicates).
|
||||
* 2) Free ports in PortPool.
|
||||
* 3) Delete Proxmox container (if it exists and is not running).
|
||||
* 4) Call dePublisher.unpublish() with hostname/ip/ports so:
|
||||
* - Technitium A + SRV are removed
|
||||
* - Cloudflare A + SRV are removed
|
||||
* - Velocity backend is unregistered
|
||||
* 5) Delete ContainerInstance row (if present).
|
||||
*/
|
||||
router.delete("/:vmid", async (req, res) => {
|
||||
const vmid = parseInt(req.params.vmid, 10);
|
||||
|
||||
if (Number.isNaN(vmid)) {
|
||||
return res.status(400).json({ ok: false, error: "Invalid VMID" });
|
||||
}
|
||||
|
||||
console.log(`[API] DELETE request received for VMID ${vmid}`);
|
||||
|
||||
try {
|
||||
// 1) Primary lookup
|
||||
const instance = await prisma.containerInstance.findUnique({
|
||||
where: { vmid },
|
||||
});
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
* CASE A: ContainerInstance exists (normal delete)
|
||||
* ----------------------------------------------------------------- */
|
||||
if (instance) {
|
||||
// Check Proxmox status to avoid deleting a running CT
|
||||
let containerStatus = null;
|
||||
try {
|
||||
containerStatus = await proxmoxClient.getContainerStatus(vmid);
|
||||
} catch {
|
||||
console.log(`[API] VMID ${vmid} not found in Proxmox (status check).`);
|
||||
}
|
||||
|
||||
if (containerStatus && containerStatus.status === "running") {
|
||||
console.log(
|
||||
`[API] ⚠️ VMID ${vmid} is running — refusing deletion until stopped.`
|
||||
);
|
||||
return res.status(409).json({
|
||||
ok: false,
|
||||
message: `Container ${vmid} is currently running. Stop it before deletion.`,
|
||||
});
|
||||
}
|
||||
|
||||
// Archive into DeletedInstance (idempotent)
|
||||
const existingDeleted = await prisma.deletedInstance.findFirst({
|
||||
where: { vmid },
|
||||
});
|
||||
|
||||
let archivedId = null;
|
||||
|
||||
if (!existingDeleted) {
|
||||
const deleted = await prisma.deletedInstance.create({
|
||||
data: {
|
||||
vmid: instance.vmid,
|
||||
customerId: instance.customerId,
|
||||
hostname: instance.hostname,
|
||||
game: instance.game,
|
||||
variant: instance.variant,
|
||||
ports: instance.ports,
|
||||
ip: instance.ip,
|
||||
reason: "api_delete",
|
||||
},
|
||||
});
|
||||
archivedId = deleted.id;
|
||||
console.log(
|
||||
`[API] Archived vmid=${vmid} into DeletedInstance (id=${deleted.id})`
|
||||
);
|
||||
} else {
|
||||
archivedId = existingDeleted.id;
|
||||
console.log(
|
||||
`[API] DeletedInstance already exists for vmid=${vmid}; skipping duplicate archive.`
|
||||
);
|
||||
}
|
||||
|
||||
// Free ports from PortPool
|
||||
const portsToFree = instance.ports || [];
|
||||
if (portsToFree.length) {
|
||||
console.log(
|
||||
`[API] Freeing ports for vmid=${vmid}:`,
|
||||
portsToFree
|
||||
);
|
||||
await prisma.portPool.updateMany({
|
||||
where: { port: { in: portsToFree }, allocatedTo: vmid },
|
||||
data: { allocatedTo: null, status: "free" },
|
||||
});
|
||||
}
|
||||
|
||||
// Delete Proxmox CT (best effort)
|
||||
try {
|
||||
await proxmoxClient.deleteContainer(vmid);
|
||||
console.log(`[API] Deleted Proxmox container vmid=${vmid}`);
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[API] ⚠️ Error deleting Proxmox container vmid=${vmid}: ${err.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// DNS + Velocity teardown
|
||||
try {
|
||||
const hostname =
|
||||
instance.hostname || (await getProxmoxHostname(vmid)) || null;
|
||||
|
||||
console.log(
|
||||
`[API] Calling dePublisher.unpublish() for vmid=${vmid}, hostname=${hostname}, ports=${portsToFree}`
|
||||
);
|
||||
|
||||
await dePublisher.unpublish({
|
||||
vmid,
|
||||
hostname,
|
||||
ip: instance.ip || null,
|
||||
ports: portsToFree,
|
||||
game: instance.game,
|
||||
customerId: instance.customerId,
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(`[API] Error during dePublisher.unpublish():`, err.message);
|
||||
}
|
||||
|
||||
// Finally, delete ContainerInstance row
|
||||
await prisma.containerInstance.delete({
|
||||
where: { vmid },
|
||||
});
|
||||
|
||||
return res.json({
|
||||
ok: true,
|
||||
vmid,
|
||||
archived: archivedId,
|
||||
});
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
* CASE B: ContainerInstance missing → use DeletedInstance fallback
|
||||
* ----------------------------------------------------------------- */
|
||||
const archived = await prisma.deletedInstance.findFirst({
|
||||
where: { vmid },
|
||||
});
|
||||
|
||||
if (archived) {
|
||||
console.log(
|
||||
`[API] Using DeletedInstance fallback for vmid=${vmid} (hostname=${archived.hostname})`
|
||||
);
|
||||
|
||||
// Free ports from PortPool using archived ports
|
||||
if (Array.isArray(archived.ports) && archived.ports.length) {
|
||||
console.log(
|
||||
`[API] Freeing ports from DeletedInstance for vmid=${vmid}:`,
|
||||
archived.ports
|
||||
);
|
||||
await prisma.portPool.updateMany({
|
||||
where: { port: { in: archived.ports }, allocatedTo: vmid },
|
||||
data: { allocatedTo: null, status: "free" },
|
||||
});
|
||||
}
|
||||
|
||||
// Delete Proxmox CT if present (best effort)
|
||||
try {
|
||||
await proxmoxClient.deleteContainer(vmid);
|
||||
console.log(`[API] Deleted Proxmox container vmid=${vmid} (fallback)`);
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[API] ⚠️ Error deleting Proxmox container vmid=${vmid} (fallback): ${err.message}`
|
||||
);
|
||||
}
|
||||
|
||||
// Full teardown via dePublisher
|
||||
try {
|
||||
await dePublisher.unpublish({
|
||||
vmid,
|
||||
hostname: archived.hostname,
|
||||
ip: archived.ip,
|
||||
ports: archived.ports || [],
|
||||
game: archived.game,
|
||||
customerId: archived.customerId,
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[API] Error in dePublisher.unpublish() for archived vmid=${vmid}:`,
|
||||
err.message
|
||||
);
|
||||
}
|
||||
|
||||
return res.json({
|
||||
ok: true,
|
||||
vmid,
|
||||
used: "DeletedInstance",
|
||||
note:
|
||||
"ContainerInstance missing; teardown completed using archived DeletedInstance data.",
|
||||
});
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
* CASE C: True orphan – no DB in either table
|
||||
* ----------------------------------------------------------------- */
|
||||
console.warn(
|
||||
`[API] VMID ${vmid} not found in ContainerInstance or DeletedInstance – performing partial teardown.`
|
||||
);
|
||||
|
||||
// Free any orphan ports
|
||||
const orphanPorts = await prisma.portPool.findMany({
|
||||
where: { allocatedTo: vmid },
|
||||
});
|
||||
|
||||
if (orphanPorts.length) {
|
||||
const portNumbers = orphanPorts.map((p) => p.port);
|
||||
console.log(
|
||||
`[API] Freeing orphan ports for vmid=${vmid}:`,
|
||||
portNumbers
|
||||
);
|
||||
await prisma.portPool.updateMany({
|
||||
where: { allocatedTo: vmid },
|
||||
data: { allocatedTo: null, status: "free" },
|
||||
});
|
||||
}
|
||||
|
||||
// Delete Proxmox CT if present
|
||||
try {
|
||||
await proxmoxClient.deleteContainer(vmid);
|
||||
console.log(`[API] Deleted Proxmox container vmid=${vmid} (orphan path)`);
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[API] ⚠️ Error deleting orphan Proxmox container vmid=${vmid}: ${err.message}`
|
||||
);
|
||||
}
|
||||
|
||||
return res.json({
|
||||
ok: true,
|
||||
vmid,
|
||||
warning:
|
||||
"Instance not found in DB; ports freed and Proxmox container deleted where possible.",
|
||||
});
|
||||
} catch (err) {
|
||||
console.error("[API] Error in DELETE /containers/:vmid:", err);
|
||||
return res
|
||||
.status(500)
|
||||
.json({ ok: false, error: "Failed to delete container" });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
16
src/routes/containers.create.js
Normal file
16
src/routes/containers.create.js
Normal file
@ -0,0 +1,16 @@
|
||||
import { Router } from 'express';
|
||||
import { createContainer } from '../api/provision.js';
|
||||
|
||||
const router = Router();
|
||||
|
||||
// POST /api/containers/create (relative path: /create)
|
||||
router.post('/', async (req, res) => {
|
||||
try {
|
||||
const instance = await createContainer(req.body, req.user);
|
||||
res.status(201).json({ ok: true, data: instance });
|
||||
} catch (err) {
|
||||
res.status(err.httpCode || 500).json({ ok: false, error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
12
src/routes/containers.js
Normal file
12
src/routes/containers.js
Normal file
@ -0,0 +1,12 @@
|
||||
// src/routes/containers.js
|
||||
import express from 'express';
|
||||
import createRouter from './containers.create.js';
|
||||
import controlsRouter from './containers.controls.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
// Mount both sub-routers; paths remain identical to before
|
||||
router.use('/create', createRouter);
|
||||
router.use(controlsRouter);
|
||||
|
||||
export default router;
|
||||
19
src/routes/debug.js
Normal file
19
src/routes/debug.js
Normal file
@ -0,0 +1,19 @@
|
||||
// src/routes/debug.js
|
||||
import { Router } from 'express';
|
||||
import { getTemplateOrThrow } from '../services/templateResolver.js';
|
||||
|
||||
const r = Router();
|
||||
|
||||
r.get('/template', async (req, res, next) => {
|
||||
try {
|
||||
const { slug, game, variant } = req.query;
|
||||
const tpl = await getTemplateOrThrow({
|
||||
templateSlug: slug || undefined,
|
||||
game: game || undefined,
|
||||
variant: variant || undefined,
|
||||
});
|
||||
res.json({ ok: true, slug: tpl.slug, templateVmid: tpl.templateVmid });
|
||||
} catch (e) { next(e); }
|
||||
});
|
||||
|
||||
export default r;
|
||||
42
src/routes/edge.js
Normal file
42
src/routes/edge.js
Normal file
@ -0,0 +1,42 @@
|
||||
import express from 'express';
|
||||
import { publishEdge, unpublishEdge } from '../services/edgePublisher.js';
|
||||
import opnsenseClient from '../services/opnsenseClient.js';
|
||||
import technitiumClient from '../services/technitiumClient.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.get('/health', async (_req, res) => {
|
||||
try {
|
||||
const opnsense = await opnsenseClient.health();
|
||||
const technitium = await technitiumClient.healthDiag();
|
||||
|
||||
res.json({
|
||||
ok: opnsense.ok && technitium.ok,
|
||||
opnsense: opnsense.ok ? 'reachable' : 'unreachable',
|
||||
technitium: technitium.ok ? 'reachable' : 'unreachable',
|
||||
});
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
router.post('/publish', async (req, res) => {
|
||||
try {
|
||||
const out = await publishEdge(req.body);
|
||||
res.json(out);
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/unpublish', async (req, res) => {
|
||||
try {
|
||||
const out = await unpublishEdge(req.body);
|
||||
res.json(out);
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e.message });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
29
src/routes/edge.test.js
Normal file
29
src/routes/edge.test.js
Normal file
@ -0,0 +1,29 @@
|
||||
import express from 'express';
|
||||
import { publishEdge, unpublishEdge, edgeHealth } from '../services/edgePublisher.js';
|
||||
|
||||
const r = express.Router();
|
||||
|
||||
r.get('/edge/health', async (_req, res) => {
|
||||
const out = await edgeHealth();
|
||||
res.status(out.ok ? 200 : 503).json(out);
|
||||
});
|
||||
|
||||
r.post('/edge/publish', async (req, res) => {
|
||||
try {
|
||||
const result = await publishEdge(req.body);
|
||||
res.json(result);
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e?.message || String(e) });
|
||||
}
|
||||
});
|
||||
|
||||
r.post('/edge/unpublish', async (req, res) => {
|
||||
try {
|
||||
const result = await unpublishEdge(req.body);
|
||||
res.json(result);
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e?.message || String(e) });
|
||||
}
|
||||
});
|
||||
|
||||
export default r;
|
||||
36
src/routes/instances.js
Normal file
36
src/routes/instances.js
Normal file
@ -0,0 +1,36 @@
|
||||
// src/routes/instances.js
|
||||
import express from 'express';
|
||||
import { provisionAgentInstance } from '../api/provisionAgent.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* POST /api/instances
|
||||
*
|
||||
* Body (v1, agent-driven):
|
||||
* {
|
||||
* customerId: "u001",
|
||||
* game: "minecraft",
|
||||
* variant: "paper",
|
||||
* version: "1.20.1",
|
||||
* world: "world",
|
||||
* ctype: "game", // or "dev"
|
||||
* name: "my-first-server",
|
||||
* cpuCores: 2,
|
||||
* memoryMiB: 2048,
|
||||
* diskGiB: 10,
|
||||
* portsNeeded: 0, // non-MC games only
|
||||
* artifactPath: "...", // optional
|
||||
* javaPath: "..." // optional
|
||||
* }
|
||||
*/
|
||||
router.post('/', async (req, res, next) => {
|
||||
try {
|
||||
const result = await provisionAgentInstance(req.body);
|
||||
return res.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return next(err);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
68
src/routes/ports.js
Normal file
68
src/routes/ports.js
Normal file
@ -0,0 +1,68 @@
|
||||
// src/routes/ports.js
|
||||
// Slot/port management for FE/ops.
|
||||
|
||||
import express from 'express';
|
||||
import crypto from 'node:crypto';
|
||||
import prisma from '../services/prisma.js';
|
||||
import { PortAllocationService } from '../services/portAllocator.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.post('/reserve-slot', async (req, res) => {
|
||||
try {
|
||||
const { customerId, game, variant, vmid = null, purpose = 'game_main' } = req.body || {};
|
||||
if (!customerId || !game || !variant) {
|
||||
return res.status(400).json({ ok: false, error: 'customerId, game, variant are required' });
|
||||
}
|
||||
const txnId = crypto.randomUUID();
|
||||
const { slotId, port, hostname } = await PortAllocationService.reserveSlotAndPort({
|
||||
game, variant, customerId, vmid, purpose, txnId,
|
||||
});
|
||||
return res.json({ ok: true, txnId, slotId, port, hostname });
|
||||
} catch (err) {
|
||||
return res.status(err.httpCode || 500).json({ ok: false, error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/commit', async (req, res) => {
|
||||
try {
|
||||
const { txnId, vmid } = req.body || {};
|
||||
if (!txnId || !vmid) return res.status(400).json({ ok: false, error: 'txnId and vmid are required' });
|
||||
const result = await PortAllocationService.commit({ txnId, vmid: Number(vmid) });
|
||||
return res.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return res.status(err.httpCode || 500).json({ ok: false, error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
router.post('/rollback', async (req, res) => {
|
||||
try {
|
||||
const { txnId } = req.body || {};
|
||||
if (!txnId) return res.status(400).json({ ok: false, error: 'txnId is required' });
|
||||
const result = await PortAllocationService.rollbackPending({ txnId });
|
||||
return res.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return res.status(err.httpCode || 500).json({ ok: false, error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
router.get('/customer/:customerId', async (req, res) => {
|
||||
try {
|
||||
const { customerId } = req.params;
|
||||
const ports = await prisma.portPool.findMany({
|
||||
where: { customerId, status: 'allocated' },
|
||||
orderBy: { port: 'asc' },
|
||||
select: { port: true, protocol: true, vmid: true, purpose: true },
|
||||
});
|
||||
const slots = await prisma.hostSlot.findMany({
|
||||
where: { customerId, status: 'allocated' },
|
||||
orderBy: { port: 'asc' },
|
||||
select: { hostname: true, port: true, vmid: true, purpose: true, game: true, variant: true },
|
||||
});
|
||||
return res.json({ ok: true, ports, slots });
|
||||
} catch (err) {
|
||||
return res.status(500).json({ ok: false, error: err.message });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
52
src/routes/promSd.js
Normal file
52
src/routes/promSd.js
Normal file
@ -0,0 +1,52 @@
|
||||
// /src/routes/promSd.js
|
||||
import { Router } from 'express'
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
const router = Router()
|
||||
|
||||
function auth(req, res, next) {
|
||||
const hdr = req.headers.authorization || ''
|
||||
const token = hdr.startsWith('Bearer ') ? hdr.slice(7) : null
|
||||
if (!token || token !== process.env.PROM_SD_TOKEN) {
|
||||
return res.status(401).json({ error: 'unauthorized' })
|
||||
}
|
||||
next()
|
||||
}
|
||||
|
||||
// Bridge-driven exporter targets
|
||||
router.get('/exporters', auth, async (_req, res) => {
|
||||
const rows = await prisma.containerInstance.findMany({
|
||||
where: { status: { in: ['running'] }, ip: { not: null }, bridge: { not: null } }
|
||||
})
|
||||
|
||||
const BRIDGE_EXPORTERS = {
|
||||
vmbr2: [{ name: 'node', port: 9100, path: '/metrics' }],
|
||||
vmbr3: [{ name: 'node', port: 9100, path: '/metrics' }]
|
||||
}
|
||||
|
||||
const groups = []
|
||||
for (const r of rows) {
|
||||
const exporters = BRIDGE_EXPORTERS[r.bridge] || BRIDGE_EXPORTERS.vmbr3
|
||||
for (const ex of exporters) {
|
||||
groups.push({
|
||||
targets: [`${r.ip}:${ex.port}`],
|
||||
labels: {
|
||||
job: `zlh_${ex.name}`,
|
||||
exporter: ex.name,
|
||||
vmid: String(r.vmid),
|
||||
customerId: r.customerId,
|
||||
ctype: r.ctype,
|
||||
game: r.game,
|
||||
variant: r.variant,
|
||||
bridge: r.bridge,
|
||||
__meta_metrics_path: ex.path,
|
||||
__meta_scheme: ex.scheme || 'http'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
res.json(groups)
|
||||
})
|
||||
|
||||
export default router // <-- important
|
||||
15
src/routes/proxmox.js
Normal file
15
src/routes/proxmox.js
Normal file
@ -0,0 +1,15 @@
|
||||
// /src/routes/proxmox.js
|
||||
import { Router } from 'express'
|
||||
import proxmox from '../services/proxmoxClient.js'
|
||||
const router = Router()
|
||||
|
||||
router.get('/ping', async (_req, res) => {
|
||||
try {
|
||||
const data = await proxmox.ping()
|
||||
res.json({ ok: true, nodes: data })
|
||||
} catch (e) {
|
||||
res.status(500).json({ ok: false, error: e.message })
|
||||
}
|
||||
})
|
||||
|
||||
export default router
|
||||
47
src/routes/templates.js
Normal file
47
src/routes/templates.js
Normal file
@ -0,0 +1,47 @@
|
||||
// src/routes/templates.js
|
||||
import { Router } from 'express';
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
const router = Router();
|
||||
|
||||
/**
|
||||
* GET /api/containers/templates
|
||||
* Returns templates for FE selection (read-only).
|
||||
*/
|
||||
router.get('/api/containers/templates', async (req, res, next) => {
|
||||
try {
|
||||
const rows = await prisma.containerTemplate.findMany({
|
||||
orderBy: [{ game: 'asc' }, { variant: 'asc' }],
|
||||
select: {
|
||||
id: true,
|
||||
slug: true,
|
||||
ctype: true,
|
||||
game: true,
|
||||
variant: true,
|
||||
templateVmid: true,
|
||||
resources: true,
|
||||
network: true,
|
||||
storage: true,
|
||||
tags: true,
|
||||
},
|
||||
});
|
||||
|
||||
const out = rows.map(r => ({
|
||||
slug: r.slug,
|
||||
ctype: r.ctype,
|
||||
game: r.game,
|
||||
variant: r.variant,
|
||||
templateVmid: r.templateVmid,
|
||||
defaultResources: r.resources,
|
||||
network: r.network,
|
||||
storage: r.storage,
|
||||
tags: r.tags,
|
||||
}));
|
||||
|
||||
res.json(out);
|
||||
} catch (err) {
|
||||
next(err);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
36
src/scripts/fixCustomerAndPorts.js
Normal file
36
src/scripts/fixCustomerAndPorts.js
Normal file
@ -0,0 +1,36 @@
|
||||
// scripts/fixCustomerAndPorts.js
|
||||
import 'dotenv/config'
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
async function main() {
|
||||
const customerId = process.argv[2] || 'u001'
|
||||
const email = process.argv[3] || 'dev@zerolaghub.local'
|
||||
|
||||
// 1) Ensure customer exists
|
||||
const customer = await prisma.customer.upsert({
|
||||
where: { id: customerId },
|
||||
update: {},
|
||||
create: { id: customerId, email }
|
||||
})
|
||||
console.log('Customer ready:', customer.id)
|
||||
|
||||
// 2) Ensure a port block exists (adjust basePort/count if you want different)
|
||||
const basePort = 50000
|
||||
const count = 10
|
||||
|
||||
const pa = await prisma.portAllocation.upsert({
|
||||
where: { customerId: customerId },
|
||||
update: {}, // keep existing if present
|
||||
create: { customerId: customerId, basePort, count }
|
||||
})
|
||||
console.log('PortAllocation ready:', pa.customerId, pa.basePort, pa.count)
|
||||
|
||||
await prisma.$disconnect()
|
||||
}
|
||||
|
||||
main().catch(async (e) => {
|
||||
console.error(e)
|
||||
await prisma.$disconnect()
|
||||
process.exit(1)
|
||||
})
|
||||
8
src/scripts/runDnsReconcile.js
Executable file
8
src/scripts/runDnsReconcile.js
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env node
|
||||
import { reconcileDNS } from "../audit/dnsReconcile.js";
|
||||
import prisma from "../services/prisma.js";
|
||||
|
||||
const apply = process.argv.includes("--apply");
|
||||
|
||||
await reconcileDNS({ apply });
|
||||
await prisma.$disconnect();
|
||||
74
src/scripts/testDnsSync.js
Normal file
74
src/scripts/testDnsSync.js
Normal file
@ -0,0 +1,74 @@
|
||||
/**
|
||||
* ZeroLagHub – DNS Sync Audit v2
|
||||
* Compares DB, Technitium, and Cloudflare for divergence.
|
||||
* Returns which hostnames are missing, duplicated, or orphaned.
|
||||
*/
|
||||
|
||||
import prisma from "../services/prisma.js";
|
||||
import * as technitium from "../services/technitiumClient.js";
|
||||
import * as cloudflare from "../services/cloudflareClient.js";
|
||||
|
||||
async function testDnsSync() {
|
||||
console.log("🔍 Running DNS sync test...");
|
||||
|
||||
// --- 1️⃣ Get hostnames from DB ---
|
||||
const dbInstances = await prisma.containerInstance.findMany({
|
||||
select: { hostname: true },
|
||||
});
|
||||
const dbHostnames = dbInstances.map((i) => i.hostname);
|
||||
console.log(`🗃️ DB hostnames: ${dbHostnames.length}`);
|
||||
|
||||
// --- 2️⃣ Get Technitium ---
|
||||
const techRecords = await technitium.listRecords();
|
||||
const techHosts = new Set(
|
||||
techRecords
|
||||
.filter((r) => ["A", "SRV"].includes(r.type))
|
||||
.map((r) =>
|
||||
r.type === "SRV"
|
||||
? r.name.replace(/^_minecraft\._tcp\./, "")
|
||||
: r.name
|
||||
)
|
||||
);
|
||||
console.log(`🧩 Technitium records: ${techHosts.size}`);
|
||||
|
||||
// --- 3️⃣ Get Cloudflare ---
|
||||
const cfRecords = await cloudflare.listAllRecords();
|
||||
const cfHosts = new Set(
|
||||
cfRecords
|
||||
.filter((r) => ["A", "SRV"].includes(r.type))
|
||||
.map((r) =>
|
||||
r.type === "SRV"
|
||||
? r.name.replace(/^_minecraft\._tcp\./, "")
|
||||
: r.name
|
||||
)
|
||||
);
|
||||
console.log(`☁️ Cloudflare records: ${cfHosts.size}`);
|
||||
|
||||
// --- 4️⃣ Compute sets ---
|
||||
const techOnly = [...techHosts].filter((h) => !dbHostnames.includes(h));
|
||||
const cfOnly = [...cfHosts].filter((h) => !dbHostnames.includes(h));
|
||||
const inBoth = [...techHosts].filter((h) => cfHosts.has(h));
|
||||
const dbMissing = dbHostnames.filter(
|
||||
(h) => !techHosts.has(h) && !cfHosts.has(h)
|
||||
);
|
||||
|
||||
// --- 5️⃣ Display results ---
|
||||
console.log("\n🧾 ===== DNS Audit Summary =====");
|
||||
console.log(`Technitium-only records (${techOnly.length}):`);
|
||||
techOnly.forEach((h) => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\nCloudflare-only records (${cfOnly.length}):`);
|
||||
cfOnly.forEach((h) => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\nIn both (${inBoth.length}):`);
|
||||
inBoth.forEach((h) => console.log(` - ${h}`));
|
||||
|
||||
console.log(`\nMissing from both (${dbMissing.length}):`);
|
||||
dbMissing.forEach((h) => console.log(` - ${h}`));
|
||||
|
||||
console.log("\n✅ Done.\n");
|
||||
}
|
||||
|
||||
testDnsSync()
|
||||
.catch((err) => console.error("❌ DNS sync test failed:", err))
|
||||
.finally(async () => await prisma.$disconnect());
|
||||
43
src/scripts/test_edge.sh
Executable file
43
src/scripts/test_edge.sh
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
API="http://localhost:3000/api/edge"
|
||||
|
||||
echo "=== 1) Health check ==="
|
||||
curl -s "$API/health" | jq .
|
||||
|
||||
echo -e "\n=== 2) Publish ==="
|
||||
curl -s -X POST "$API/publish" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{
|
||||
"vmid": 5001,
|
||||
"ctype": "game",
|
||||
"hostname": "mc-0001.zpack.zerolaghub.com",
|
||||
"publicPort": 25565,
|
||||
"privateIp": "10.200.0.50",
|
||||
"privatePort": 25565
|
||||
}' | tee /tmp/publish.json | jq .
|
||||
|
||||
echo -e "\n=== 3) Verify OPNsense Relayd rule ==="
|
||||
curl -sk -u "$OPNSENSE_API_KEY:$OPNSENSE_API_SECRET" \
|
||||
"$OPNSENSE_API_URL/relayd/service/search" | jq .
|
||||
|
||||
echo -e "\n=== 4) Verify Technitium DNS A record ==="
|
||||
curl -s "$TECHNITIUM_API_URL/dns/records/search?zone=$DNS_ZONE&type=A&token=$TECHNITIUM_API_TOKEN" | jq .
|
||||
|
||||
echo -e "\n=== 5) Verify Technitium DNS SRV record ==="
|
||||
curl -s "$TECHNITIUM_API_URL/dns/records/search?zone=$DNS_ZONE&type=SRV&token=$TECHNITIUM_API_TOKEN" | jq .
|
||||
|
||||
echo -e "\n=== 6) Unpublish ==="
|
||||
curl -s -X POST "$API/unpublish" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data-binary '{
|
||||
"vmid": 5001,
|
||||
"hostname": "mc-0001.zpack.zerolaghub.com",
|
||||
"publicPort": 25565
|
||||
}' | jq .
|
||||
|
||||
echo -e "\n=== 7) Verify cleanup ==="
|
||||
curl -sk -u "$OPNSENSE_API_KEY:$OPNSENSE_API_SECRET" \
|
||||
"$OPNSENSE_API_URL/relayd/service/search" | jq .
|
||||
curl -s "$TECHNITIUM_API_URL/dns/records/search?zone=$DNS_ZONE&token=$TECHNITIUM_API_TOKEN" | jq .
|
||||
195
src/services/cloudflareClient.js
Normal file
195
src/services/cloudflareClient.js
Normal file
@ -0,0 +1,195 @@
|
||||
// src/services/cloudflareClient.js
|
||||
// FINAL, CLEAN, BULLETPROOF CLOUDFLARE CLIENT
|
||||
|
||||
import axios from "axios";
|
||||
|
||||
const CF_API_BASE = "https://api.cloudflare.com/client/v4";
|
||||
const CF_ZONE_ID = process.env.CLOUDFLARE_ZONE_ID;
|
||||
const CF_API_TOKEN = process.env.CLOUDFLARE_API_TOKEN;
|
||||
const CF_ZONE = process.env.CLOUDFLARE_ZONE_NAME || "zerolaghub.quest";
|
||||
|
||||
if (!CF_API_TOKEN || !CF_ZONE_ID) {
|
||||
console.warn("[cloudflareClient] ⚠ Missing API token or zone ID");
|
||||
}
|
||||
|
||||
const cf = axios.create({
|
||||
baseURL: CF_API_BASE,
|
||||
headers: {
|
||||
Authorization: `Bearer ${CF_API_TOKEN}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
function normalizeHostname(hostname) {
|
||||
if (!hostname) return "";
|
||||
|
||||
const h = hostname.trim().toLowerCase();
|
||||
if (h.endsWith(`.${CF_ZONE}`)) return h;
|
||||
return `${h}.${CF_ZONE}`;
|
||||
}
|
||||
|
||||
function extractBase(hostname) {
|
||||
const h = hostname.trim().toLowerCase();
|
||||
if (h.endsWith(`.${CF_ZONE}`))
|
||||
return h.slice(0, h.length - CF_ZONE.length - 1);
|
||||
return h;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Create A */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export async function createARecord({ hostname, ip }) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
|
||||
try {
|
||||
await cf.post(`/zones/${CF_ZONE_ID}/dns_records`, {
|
||||
type: "A",
|
||||
name: fqdn,
|
||||
content: ip,
|
||||
ttl: 60,
|
||||
proxied: false,
|
||||
});
|
||||
|
||||
console.log(`[cloudflareClient] ➕ A: ${fqdn} -> ${ip}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[cloudflareClient] ❌ A create failed for ${fqdn}`,
|
||||
err.response?.data || err.message
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Create SRV (_minecraft._tcp) */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export async function createSRVRecord({ hostname, port }) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
const base = extractBase(hostname);
|
||||
|
||||
// Cloudflare stores SRV as: _minecraft._tcp.<hostname>.<zone>
|
||||
const srvName = `_minecraft._tcp.${fqdn}`;
|
||||
|
||||
try {
|
||||
await cf.post(`/zones/${CF_ZONE_ID}/dns_records`, {
|
||||
type: "SRV",
|
||||
name: srvName,
|
||||
data: {
|
||||
service: "_minecraft",
|
||||
proto: "_tcp",
|
||||
name: base, // Not full FQDN
|
||||
target: fqdn,
|
||||
port,
|
||||
priority: 0,
|
||||
weight: 0,
|
||||
},
|
||||
ttl: 60,
|
||||
});
|
||||
|
||||
console.log(
|
||||
`[cloudflareClient] ➕ SRV: ${srvName} -> ${fqdn}:${port}`
|
||||
);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[cloudflareClient] ❌ SRV create failed for ${srvName}`,
|
||||
err.response?.data || err.message
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Delete ANY matching A + SRV records */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export async function deleteRecordByName(hostname) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
const base = extractBase(hostname);
|
||||
|
||||
// SRV stored format:
|
||||
const srvExact = `_minecraft._tcp.${fqdn}`;
|
||||
|
||||
// All candidate names to search for
|
||||
const patterns = [
|
||||
fqdn, // A record
|
||||
base, // Rare case (not used)
|
||||
srvExact, // Correct SRV
|
||||
`_minecraft._tcp.${base}`, // Rare CF variations
|
||||
];
|
||||
|
||||
let deleted = 0;
|
||||
const tried = new Set();
|
||||
|
||||
console.log(`[cloudflareClient] 🧹 BEGIN delete for base=${hostname}`);
|
||||
|
||||
for (const name of patterns) {
|
||||
if (!name || tried.has(name)) continue;
|
||||
tried.add(name);
|
||||
|
||||
console.log(`[cloudflareClient] 🔍 Searching name=${name}`);
|
||||
|
||||
let res;
|
||||
try {
|
||||
res = await cf.get(
|
||||
`/zones/${CF_ZONE_ID}/dns_records?name=${encodeURIComponent(name)}`
|
||||
);
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[cloudflareClient] ⚠ Query failed for name=${name}:`,
|
||||
err.response?.data || err.message
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const matches = res.data?.result || [];
|
||||
if (!matches.length) {
|
||||
console.log(`[cloudflareClient] (CF) No match for: ${name}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const rec of matches) {
|
||||
try {
|
||||
await cf.delete(`/zones/${CF_ZONE_ID}/dns_records/${rec.id}`);
|
||||
console.log(
|
||||
`[cloudflareClient] 🗑️ Deleted ${rec.type} ${rec.name}`
|
||||
);
|
||||
deleted++;
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[cloudflareClient] ❌ Failed deleting ${rec.type} ${rec.name}`,
|
||||
err.response?.data || err.message
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (deleted === 0) {
|
||||
console.log(
|
||||
`[cloudflareClient] ⚠️ No Cloudflare records deleted for ${hostname}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[cloudflareClient] ✅ Cloudflare cleanup completed for ${hostname}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Export */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export default {
|
||||
createARecord,
|
||||
createSRVRecord,
|
||||
deleteRecordByName,
|
||||
};
|
||||
84
src/services/dePublisher.js
Normal file
84
src/services/dePublisher.js
Normal file
@ -0,0 +1,84 @@
|
||||
import proxyClient from "./proxyClient.js";
|
||||
import dns from "./technitiumClient.js";
|
||||
import cloudflareClient from "./cloudflareClient.js";
|
||||
import velocityClient from "./velocityClient.js";
|
||||
|
||||
function normalizeHostname(hostname) {
|
||||
if (!hostname) return "";
|
||||
return hostname.trim().toLowerCase();
|
||||
}
|
||||
|
||||
function toFqdn(hostname) {
|
||||
const zone = process.env.CF_ZONE_NAME || "zerolaghub.quest";
|
||||
if (!hostname.includes(".")) {
|
||||
return `${hostname}.${zone}`;
|
||||
}
|
||||
return hostname;
|
||||
}
|
||||
|
||||
export async function unpublish({
|
||||
hostname,
|
||||
vmid,
|
||||
game = "minecraft",
|
||||
ports = [],
|
||||
dryRun = false,
|
||||
}) {
|
||||
hostname = normalizeHostname(hostname);
|
||||
const fqdn = toFqdn(hostname);
|
||||
|
||||
console.log(`[dePublisher] BEGIN teardown for vmid=${vmid} (${hostname})`);
|
||||
|
||||
/* ---------------------- 1️⃣ Traefik cleanup ---------------------- */
|
||||
try {
|
||||
console.log(`[dePublisher] Removing Traefik config for ${hostname}`);
|
||||
if (!dryRun) {
|
||||
const removed = await proxyClient.removeProxyConfig({ hostname });
|
||||
if (!removed) console.log(`[dePublisher] No Traefik config found`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn(`[dePublisher] ⚠️ Traefik cleanup failed: ${err.message}`);
|
||||
}
|
||||
|
||||
/* ---------------------- 2️⃣ Velocity cleanup ---------------------- */
|
||||
try {
|
||||
if (!dryRun) {
|
||||
console.log(`[dePublisher] Unregistering from Velocity using FQDN: ${fqdn}`);
|
||||
const res = await velocityClient.unregisterServer(fqdn);
|
||||
console.log(`[dePublisher] ✓ Velocity unregistered ${fqdn}: ${res}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn(`[dePublisher] ⚠️ Velocity cleanup failed: ${err.message}`);
|
||||
}
|
||||
|
||||
/* ---------------------- 3️⃣ Technitium ---------------------- */
|
||||
try {
|
||||
console.log(`[dePublisher] Deleting Technitium records for ${hostname}`);
|
||||
if (!dryRun) {
|
||||
await dns.delARecord({ hostname });
|
||||
await dns.delSRVRecord({ hostname });
|
||||
}
|
||||
console.log(`[dePublisher] ✓ Technitium cleanup OK`);
|
||||
} catch (err) {
|
||||
console.warn(`[dePublisher] ⚠️ Technitium cleanup failed: ${err.message}`);
|
||||
}
|
||||
|
||||
/* ---------------------- 5️⃣ Cloudflare ---------------------- */
|
||||
// Cloudflare cleanup
|
||||
try {
|
||||
console.log(`[dePublisher] Removing Cloudflare A + SRV for ${hostname}`);
|
||||
if (!dryRun) {
|
||||
await cloudflareClient.deleteRecordByName(hostname);
|
||||
await cloudflareClient.deleteRecordByName(`_minecraft._tcp.${hostname}`);
|
||||
}
|
||||
console.log(`[dePublisher] ✓ Cloudflare cleanup OK`);
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[dePublisher] ⚠️ Cloudflare cleanup failed: ${err.message}`
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`[dePublisher] ✅ Teardown complete for ${hostname}`);
|
||||
return true;
|
||||
}
|
||||
|
||||
export default { unpublish };
|
||||
325
src/services/edgePublisher.js
Normal file
325
src/services/edgePublisher.js
Normal file
@ -0,0 +1,325 @@
|
||||
// src/services/edgePublisher.js
|
||||
// Publishes Traefik/Velocity backend routing + DNS (Technitium + Cloudflare)
|
||||
// and handles multi-game support. Minecraft uses Velocity, other games use Traefik.
|
||||
//
|
||||
// Relies on env:
|
||||
// VELOCITY_EDGE_IP (e.g. 10.70.0.241)
|
||||
// TRAEFIK_EDGE_IP (e.g. 10.60.0.242)
|
||||
// CLOUDFLARE_EDGE_IP (e.g. 139.64.165.248) // public ZPACK OPNsense
|
||||
// EDGE_PUBLIC_IP (legacy fallback for public IP)
|
||||
// DNS_ZONE or TECHNITIUM_ZONE (e.g. zerolaghub.quest)
|
||||
|
||||
import proxyClient from "./proxyClient.js";
|
||||
import dns from "./technitiumClient.js";
|
||||
import cloudflareClient from "./cloudflareClient.js";
|
||||
import velocityClient from "./velocityClient.js";
|
||||
import { unpublish } from "./dePublisher.js";
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Game metadata */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const GAME_SRV = {
|
||||
minecraft: { service: "minecraft", protocol: "tcp", defaultPort: 25565 },
|
||||
mc: { service: "minecraft", protocol: "tcp", defaultPort: 25565 },
|
||||
rust: { service: "rust", protocol: "udp", defaultPort: 28015 },
|
||||
terraria: { service: "terraria", protocol: "tcp", defaultPort: 7777 },
|
||||
projectzomboid: { service: "projectzomboid", protocol: "udp", defaultPort: 16261 },
|
||||
valheim: { service: "valheim", protocol: "udp", defaultPort: 2456 },
|
||||
palworld: { service: "palworld", protocol: "udp", defaultPort: 8211 },
|
||||
generic: { service: "game", protocol: "tcp", defaultPort: 25565 },
|
||||
};
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
function isMinecraftGame(game) {
|
||||
const g = String(game || "").toLowerCase();
|
||||
return g === "mc" || g.includes("minecraft");
|
||||
}
|
||||
|
||||
/**
|
||||
* Decide which internal edge IP Technitium should point to.
|
||||
* - Minecraft → Velocity (VELOCITY_EDGE_IP)
|
||||
* - Other → Traefik (TRAEFIK_EDGE_IP)
|
||||
*/
|
||||
function pickInternalEdgeIp(game) {
|
||||
if (isMinecraftGame(game)) {
|
||||
return (
|
||||
process.env.VELOCITY_EDGE_IP || // 10.70.0.241
|
||||
process.env.TRAEFIK_EDGE_IP || // fallback if misconfigured
|
||||
"10.70.0.241"
|
||||
);
|
||||
}
|
||||
|
||||
// Non-Minecraft: default to Traefik
|
||||
return (
|
||||
process.env.TRAEFIK_EDGE_IP || // 10.60.0.242
|
||||
process.env.VELOCITY_EDGE_IP || // last-resort fallback
|
||||
"10.60.0.242"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Public edge IP for Cloudflare A/SRV.
|
||||
* Always the ZPACK OPNsense WAN (139.64.165.248).
|
||||
*/
|
||||
function pickPublicEdgeIp() {
|
||||
return (
|
||||
process.env.CLOUDFLARE_EDGE_IP ||
|
||||
process.env.EDGE_PUBLIC_IP || // legacy name
|
||||
"139.64.165.248"
|
||||
);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Primary publisher */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Publish edge routing + DNS + Velocity registration.
|
||||
*
|
||||
* Called from postProvision:
|
||||
* edgePublisher.publishEdge({
|
||||
* vmid,
|
||||
* ports, // external/public ports OR [25565] for MC
|
||||
* ip, // container IP (ctIp)
|
||||
* slotHostname, // short hostname (mc-paper-5013)
|
||||
* game
|
||||
* })
|
||||
*/
|
||||
export async function publishEdge({
|
||||
vmid,
|
||||
ports = [],
|
||||
ip,
|
||||
ctIp, // older callers may pass ctIp instead of ip
|
||||
slotHostname,
|
||||
game,
|
||||
}) {
|
||||
const backendIp = ctIp || ip;
|
||||
if (!vmid) throw new Error("[edgePublisher] vmid is required");
|
||||
if (!backendIp)
|
||||
throw new Error(
|
||||
`[edgePublisher] Missing backend IP (ctIp/ip) for vmid=${vmid}`
|
||||
);
|
||||
|
||||
const gameKey = String(game || "").toLowerCase();
|
||||
const meta = GAME_SRV[gameKey] || GAME_SRV.generic;
|
||||
const isMC = isMinecraftGame(gameKey);
|
||||
|
||||
const ZONE =
|
||||
process.env.TECHNITIUM_ZONE ||
|
||||
process.env.DNS_ZONE ||
|
||||
"zerolaghub.quest";
|
||||
|
||||
// fqdn: ensure we have <hostname>.<zone>
|
||||
if (!slotHostname)
|
||||
throw new Error("[edgePublisher] slotHostname is required");
|
||||
const fqdn = slotHostname.includes(".")
|
||||
? slotHostname
|
||||
: `${slotHostname}.${ZONE}`;
|
||||
|
||||
const internalEdgeIp = pickInternalEdgeIp(gameKey); // Technitium A
|
||||
const publicEdgeIp = pickPublicEdgeIp(); // Cloudflare A
|
||||
|
||||
const externalPort = ports[0] || meta.defaultPort;
|
||||
|
||||
console.log(
|
||||
`[edgePublisher] START vmid=${vmid}, game=${gameKey}, backend=${backendIp}, internalEdgeIp=${internalEdgeIp}, publicEdgeIp=${publicEdgeIp}, ports=${ports.join(
|
||||
","
|
||||
)}`
|
||||
);
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* 1) Traefik / TCP routing (non-Minecraft only) */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
if (isMC) {
|
||||
console.log(
|
||||
`[edgePublisher] Skipping Traefik TCP config (Minecraft handled by Velocity)`
|
||||
);
|
||||
} else {
|
||||
for (const port of ports) {
|
||||
try {
|
||||
console.log(
|
||||
`[edgePublisher] Adding Traefik TCP entry for ${fqdn}:${port} -> ${backendIp}:${meta.defaultPort ||
|
||||
port}`
|
||||
);
|
||||
await proxyClient.addProxyConfig({
|
||||
vmid,
|
||||
hostname: slotHostname,
|
||||
externalPort: port,
|
||||
ctIp: backendIp, // LXC IP
|
||||
ctPort: meta.defaultPort || port, // internal game port
|
||||
game: gameKey,
|
||||
protocol: meta.protocol,
|
||||
});
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Traefik config applied for ${slotHostname}:${port}`
|
||||
);
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[edgePublisher] ❌ Failed to push Traefik config for ${slotHostname}:${port}:`,
|
||||
err?.message || err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* 2) Technitium internal DNS */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
try {
|
||||
console.log(
|
||||
`[edgePublisher] Creating Technitium A record ${fqdn} → ${internalEdgeIp}`
|
||||
);
|
||||
await dns.addARecord({
|
||||
hostname: fqdn,
|
||||
ipAddress: internalEdgeIp,
|
||||
ttl: 60,
|
||||
});
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Technitium A record created: ${fqdn} → ${internalEdgeIp}`
|
||||
);
|
||||
|
||||
if (externalPort) {
|
||||
console.log(
|
||||
`[edgePublisher] Creating Technitium SRV _${meta.service}._${meta.protocol}.${fqdn} → ${fqdn}:${externalPort}`
|
||||
);
|
||||
await dns.addSRVRecord({
|
||||
service: meta.service,
|
||||
protocol: meta.protocol,
|
||||
hostname: fqdn,
|
||||
port: externalPort,
|
||||
target: fqdn,
|
||||
ttl: 60,
|
||||
});
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Technitium SRV created for ${fqdn} port ${externalPort}`
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[edgePublisher] ❌ Technitium DNS publish failed for ${fqdn}:`,
|
||||
err?.response?.data || err?.message || err
|
||||
);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* 3) Cloudflare public DNS */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
try {
|
||||
console.log(
|
||||
`[edgePublisher] Creating Cloudflare A record ${fqdn} → ${publicEdgeIp}`
|
||||
);
|
||||
await cloudflareClient.createARecord({
|
||||
hostname: fqdn,
|
||||
ip: publicEdgeIp,
|
||||
});
|
||||
|
||||
if (externalPort) {
|
||||
await cloudflareClient.createSRVRecord({
|
||||
service: meta.service,
|
||||
protocol: meta.protocol,
|
||||
hostname: fqdn,
|
||||
port: externalPort,
|
||||
target: fqdn,
|
||||
});
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Cloudflare SRV created for ${fqdn} on port ${externalPort}`
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Cloudflare A record created (no SRV needed)`
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[edgePublisher] ⚠️ Cloudflare publish failed for ${fqdn}:`,
|
||||
err?.response?.data || err?.message || err
|
||||
);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/* 4) Velocity registration (Minecraft only) */
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
if (isMC) {
|
||||
try {
|
||||
console.log(
|
||||
`[edgePublisher] Registering Minecraft backend with Velocity: ${slotHostname} → ${backendIp}:25565`
|
||||
);
|
||||
const res = await velocityClient.registerServer({
|
||||
name: slotHostname,
|
||||
address: backendIp,
|
||||
port: 25565, // internal MC port in the container
|
||||
});
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Velocity registered ${slotHostname} → ${backendIp}:25565 (${res})`
|
||||
);
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[edgePublisher] ⚠️ Velocity registration failed for ${slotHostname}:`,
|
||||
err?.message || err
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
`[edgePublisher] Skipping Velocity registration (game=${gameKey})`
|
||||
);
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[edgePublisher] COMPLETE vmid=${vmid}, fqdn=${fqdn}, game=${gameKey}`
|
||||
);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Rollback helper (delegates to dePublisher) */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export async function rollbackEdge({ slotHostname, vmid, game, ports }) {
|
||||
console.log(
|
||||
`[edgePublisher] ⚠️ Edge rollback requested for ${slotHostname || vmid}`
|
||||
);
|
||||
try {
|
||||
await unpublish({ hostname: slotHostname, vmid, game, ports });
|
||||
console.log(
|
||||
`[edgePublisher] ✓ Edge rollback completed for ${slotHostname || vmid}`
|
||||
);
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[edgePublisher] ❌ Edge rollback failed for ${slotHostname || vmid}:`,
|
||||
err?.message || err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Health Check (optional) */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
export async function edgeHealth() {
|
||||
try {
|
||||
// Minimal health response
|
||||
return {
|
||||
ok: true,
|
||||
message: "edgePublisher online (dummy health check)",
|
||||
};
|
||||
} catch (err) {
|
||||
return { ok: false, error: err?.message || String(err) };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export const unpublishEdge = rollbackEdge;
|
||||
|
||||
export default {
|
||||
publishEdge,
|
||||
rollbackEdge,
|
||||
unpublishEdge,
|
||||
edgeHealth,
|
||||
};
|
||||
77
src/services/envFileWriter.js
Normal file
77
src/services/envFileWriter.js
Normal file
@ -0,0 +1,77 @@
|
||||
// src/services/envFileWriter.js
|
||||
import { Client } from 'ssh2';
|
||||
import fs from 'fs/promises';
|
||||
import fssync from 'fs'; // for reading private key
|
||||
import path from 'path';
|
||||
|
||||
/**
|
||||
* Writes an env file for a VMID to /etc/zlh/slots on the Proxmox host.
|
||||
* Uses SFTP over SSH (no remote exec needed).
|
||||
*/
|
||||
export async function writeSlotEnv(vmid, data) {
|
||||
const envLines = Object.entries(data)
|
||||
.map(([k, v]) => `${k}=${v}`)
|
||||
.join('\n');
|
||||
|
||||
// Write to a temp file locally
|
||||
const tmpPath = path.join('/tmp', `${vmid}.env`);
|
||||
await fs.writeFile(tmpPath, envLines, { mode: 0o600 });
|
||||
|
||||
const remotePath = `/etc/zlh/slots/${vmid}.env`;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const conn = new Client();
|
||||
conn
|
||||
.on('ready', () => {
|
||||
conn.sftp((err, sftp) => {
|
||||
if (err) {
|
||||
conn.end();
|
||||
return reject(err);
|
||||
}
|
||||
sftp.fastPut(tmpPath, remotePath, (err2) => {
|
||||
conn.end();
|
||||
if (err2) return reject(err2);
|
||||
console.log(`[envFileWriter] wrote env file for vmid=${vmid} → ${remotePath}`);
|
||||
resolve(remotePath);
|
||||
});
|
||||
});
|
||||
})
|
||||
.on('error', (err) => reject(err))
|
||||
.connect({
|
||||
host: process.env.PROXMOX_SSH_HOST, // e.g. zlh-prod1
|
||||
username: process.env.PROXMOX_SSH_USER || 'apiuser',
|
||||
privateKey: fssync.readFileSync(process.env.PROXMOX_SSH_KEY),
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the env file for a VMID from /etc/zlh/slots on the Proxmox host.
|
||||
*/
|
||||
export async function removeSlotEnv(vmid) {
|
||||
const remotePath = `/etc/zlh/slots/${vmid}.env`;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const conn = new Client();
|
||||
conn
|
||||
.on('ready', () => {
|
||||
conn.sftp((err, sftp) => {
|
||||
if (err) {
|
||||
conn.end();
|
||||
return reject(err);
|
||||
}
|
||||
sftp.unlink(remotePath, (err2) => {
|
||||
conn.end();
|
||||
if (err2 && err2.code !== 2) return reject(err2); // ignore "no such file"
|
||||
resolve(true);
|
||||
});
|
||||
});
|
||||
})
|
||||
.on('error', (err) => reject(err))
|
||||
.connect({
|
||||
host: process.env.PROXMOX_SSH_HOST,
|
||||
username: process.env.PROXMOX_SSH_USER || 'apiuser',
|
||||
privateKey: fssync.readFileSync(process.env.PROXMOX_SSH_KEY),
|
||||
});
|
||||
});
|
||||
}
|
||||
53
src/services/getCtIp.js
Normal file
53
src/services/getCtIp.js
Normal file
@ -0,0 +1,53 @@
|
||||
// src/services/getCtIp.js
|
||||
import 'dotenv/config';
|
||||
import proxmox from './proxmoxClient.js';
|
||||
|
||||
// Pull IPv4 from /lxc/{vmid}/interfaces
|
||||
export async function getCtIp(vmid, node = process.env.PROXMOX_NODE) {
|
||||
try {
|
||||
const ifaces = await proxmox.getContainerInterfaces(vmid); // node is already handled in proxmoxClient
|
||||
for (const intf of ifaces) {
|
||||
if (intf.name === 'lo') continue;
|
||||
if (Array.isArray(intf['ip-addresses'])) {
|
||||
const ipv4 = intf['ip-addresses'].find((ip) =>
|
||||
ip['ip-address']?.includes('.')
|
||||
);
|
||||
if (ipv4) return ipv4['ip-address'];
|
||||
}
|
||||
if (intf.inet?.includes('.')) {
|
||||
return intf.inet.split('/')[0];
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[getCtIp] failed for vmid=${vmid} on node=${node}:`,
|
||||
err.message || err
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Retry wrapper: loop until IP found or timeout
|
||||
export async function getCtIpWithRetry(
|
||||
vmid,
|
||||
node = process.env.PROXMOX_NODE,
|
||||
retries = 12,
|
||||
delayMs = 10_000
|
||||
) {
|
||||
let last;
|
||||
for (let i = 0; i < retries; i++) {
|
||||
const ip = await getCtIp(vmid, node);
|
||||
if (ip) return ip;
|
||||
console.log(
|
||||
`[getCtIpWithRetry] IP retry ${i + 1}/${retries}... waiting ${
|
||||
delayMs / 1000
|
||||
}s`
|
||||
);
|
||||
last = new Error(`IP not ready (attempt ${i + 1})`);
|
||||
await new Promise((r) => setTimeout(r, delayMs));
|
||||
}
|
||||
throw last || new Error(`could not resolve IP for vmid=${vmid}`);
|
||||
}
|
||||
|
||||
export default { getCtIp, getCtIpWithRetry };
|
||||
38
src/services/haproxyClient.js.old
Normal file
38
src/services/haproxyClient.js.old
Normal file
@ -0,0 +1,38 @@
|
||||
import { exec } from 'child_process';
|
||||
|
||||
const HAPROXY_HOST = process.env.HAPROXY_HOST || 'zlhproxy@100.71.44.12';
|
||||
const HAPROXY_CFG = '/etc/haproxy/zlh.cfg';
|
||||
|
||||
export async function addProxyConfig({ vmid, hostname, externalPort, ctIp, ctPort }) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const block = `
|
||||
# --- Auto-generated by ZeroLagHub API ---
|
||||
listen ${hostname}-${vmid}
|
||||
bind 0.0.0.0:${externalPort}
|
||||
mode tcp
|
||||
option tcplog
|
||||
server ${hostname}-${vmid} ${ctIp}:${ctPort}
|
||||
# --- End of auto-generated block ---
|
||||
`;
|
||||
|
||||
const cmd = `ssh ${HAPROXY_HOST} "echo '${block.replace(/'/g, "'\\''")}' | sudo tee -a ${HAPROXY_CFG} && sudo systemctl reload haproxy"`;
|
||||
exec(cmd, (err, stdout, stderr) => {
|
||||
if (err) return reject(stderr || err);
|
||||
console.log(`[haproxyClient] Added listener ${hostname}:${externalPort} -> ${ctIp}:${ctPort}`);
|
||||
resolve(stdout);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export async function removeProxyConfig({ hostname }) {
|
||||
const cmd = `ssh ${HAPROXY_HOST} "sudo sed -i '/# --- Auto-generated by ZeroLagHub API ---/,/# --- End of auto-generated block ---/d' ${HAPROXY_CFG} && sudo systemctl reload haproxy"`;
|
||||
return new Promise((resolve, reject) => {
|
||||
exec(cmd, (err) => {
|
||||
if (err) return reject(err);
|
||||
console.log(`[haproxyClient] Removed listener ${hostname}`);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export default { addProxyConfig, removeProxyConfig };
|
||||
124
src/services/opnsenseClient.js
Normal file
124
src/services/opnsenseClient.js
Normal file
@ -0,0 +1,124 @@
|
||||
// src/services/opnsenseClient.js
|
||||
// Uses OPNsense HAProxy plugin API (add_backend → add_server → add_frontend → reconfigure)
|
||||
|
||||
import axios from 'axios';
|
||||
import https from 'https';
|
||||
|
||||
const BASE = process.env.OPNSENSE_API_URL;
|
||||
const KEY = process.env.OPNSENSE_API_KEY;
|
||||
const SECRET = process.env.OPNSENSE_API_SECRET;
|
||||
const TIMEOUT_MS = Number(process.env.OPNSENSE_TIMEOUT_MS || 10000);
|
||||
|
||||
const client = axios.create({
|
||||
baseURL: BASE,
|
||||
timeout: TIMEOUT_MS,
|
||||
httpsAgent: new https.Agent({ rejectUnauthorized: false }),
|
||||
auth: { username: KEY, password: SECRET },
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Health
|
||||
// ----------------------------------------------------------------------
|
||||
export async function health() {
|
||||
try {
|
||||
const { data } = await client.get('/haproxy/service/status');
|
||||
return data?.status ? true : false;
|
||||
} catch (e) {
|
||||
console.warn('[opnsense] health check failed:', e.message);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Create HAProxy backend + server + frontend
|
||||
// ----------------------------------------------------------------------
|
||||
export async function createPortForward({ vmid, publicPort, privateIp, privatePort }) {
|
||||
const backendName = `zpack-backend-${vmid}-${publicPort}`;
|
||||
const serverName = `srv-${vmid}-${publicPort}`;
|
||||
const frontendName = `zpack-frontend-${vmid}-${publicPort}`;
|
||||
|
||||
try {
|
||||
// 1. Create backend
|
||||
const backendPayload = {
|
||||
Backend: {
|
||||
name: backendName,
|
||||
description: `Backend for vmid=${vmid}`,
|
||||
mode: 'tcp',
|
||||
enabled: '1',
|
||||
},
|
||||
};
|
||||
console.log('[opnsense] add_backend payload=', backendPayload);
|
||||
const backendRes = await client.post('/haproxy/settings/add_backend', backendPayload);
|
||||
const backendUuid = backendRes?.data?.uuid;
|
||||
console.log('[opnsense] add_backend result=', backendRes.data);
|
||||
|
||||
// 2. Create server bound to backend
|
||||
const serverPayload = {
|
||||
Server: {
|
||||
name: serverName,
|
||||
description: `Server for vmid=${vmid}`,
|
||||
address: privateIp,
|
||||
port: String(privatePort),
|
||||
enabled: '1',
|
||||
backend: backendUuid,
|
||||
},
|
||||
};
|
||||
console.log('[opnsense] add_server payload=', serverPayload);
|
||||
const serverRes = await client.post('/haproxy/settings/add_server', serverPayload);
|
||||
console.log('[opnsense] add_server result=', serverRes.data);
|
||||
|
||||
// 3. Create frontend bound to backend
|
||||
const frontendPayload = {
|
||||
Frontend: {
|
||||
name: frontendName,
|
||||
description: `Frontend for vmid=${vmid}`,
|
||||
enabled: '1',
|
||||
listenAddress: '0.0.0.0',
|
||||
listenPort: String(publicPort),
|
||||
mode: 'tcp',
|
||||
default_backend: backendUuid,
|
||||
},
|
||||
};
|
||||
console.log('[opnsense] add_frontend payload=', frontendPayload);
|
||||
const frontendRes = await client.post('/haproxy/settings/add_frontend', frontendPayload);
|
||||
const frontendUuid = frontendRes?.data?.uuid;
|
||||
console.log('[opnsense] add_frontend result=', frontendRes.data);
|
||||
|
||||
// 4. Apply changes
|
||||
const reconfigRes = await client.post('/haproxy/service/reconfigure');
|
||||
console.log('[opnsense] reconfigure result=', reconfigRes.data);
|
||||
|
||||
return { ok: true, backend: backendRes.data, server: serverRes.data, frontend: frontendRes.data, reconfig: reconfigRes.data };
|
||||
} catch (e) {
|
||||
console.error('[opnsense] createPortForward error:');
|
||||
if (e.response?.data) {
|
||||
console.error('Response body:', JSON.stringify(e.response.data, null, 2));
|
||||
} else {
|
||||
console.error(e.message || e);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Delete frontend + backend (and implicitly server)
|
||||
// ----------------------------------------------------------------------
|
||||
export async function deletePortForward({ backendUuid, frontendUuid }) {
|
||||
try {
|
||||
if (frontendUuid) {
|
||||
await client.post(`/haproxy/settings/del_frontend/${frontendUuid}`);
|
||||
}
|
||||
if (backendUuid) {
|
||||
await client.post(`/haproxy/settings/del_backend/${backendUuid}`);
|
||||
}
|
||||
const reconfigRes = await client.post('/haproxy/service/reconfigure');
|
||||
console.log('[opnsense] delete reconfigure result=', reconfigRes.data);
|
||||
return { ok: true, reconfig: reconfigRes.data };
|
||||
} catch (e) {
|
||||
console.error('[opnsense] deletePortForward error:', e.response?.data || e.message);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
export default { health, createPortForward, deletePortForward };
|
||||
150
src/services/portAllocator.js
Normal file
150
src/services/portAllocator.js
Normal file
@ -0,0 +1,150 @@
|
||||
// src/services/portAllocator.js
|
||||
// Centralised port allocation logic for ZeroLagHub.
|
||||
//
|
||||
// Works with the new PortPool schema:
|
||||
//
|
||||
// model PortPool {
|
||||
// id Int @id @default(autoincrement())
|
||||
// port Int
|
||||
// portType String // "game" | "dev" | custom
|
||||
// status PortStatus @default(free)
|
||||
// allocatedTo Int? // vmid
|
||||
// createdAt DateTime @default(now())
|
||||
// updatedAt DateTime @updatedAt
|
||||
//
|
||||
// @@unique([port])
|
||||
// @@index([status, portType])
|
||||
// }
|
||||
//
|
||||
// ENUM:
|
||||
//
|
||||
// enum PortStatus {
|
||||
// free
|
||||
// reserved
|
||||
// allocated
|
||||
// }
|
||||
//
|
||||
// This allocator handles:
|
||||
// - reserve() → lock free ports to a vmid (status=reserved)
|
||||
// - commit() → convert reserved → allocated after provisioning
|
||||
// - release() → free all ports for a vmid (rollback / deletion)
|
||||
|
||||
import prisma from '../services/prisma.js';
|
||||
import { PortStatus } from '@prisma/client';
|
||||
|
||||
const DEFAULT_PORT_TYPE = 'game';
|
||||
|
||||
export class PortAllocationService {
|
||||
/**
|
||||
* Reserve a contiguous block of ports for a container.
|
||||
*
|
||||
* Used during provisioning (STEP 3). VMID is already known at this point,
|
||||
* so we bind the reservation to that VMID immediately with status "reserved".
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {number} options.count How many ports to reserve.
|
||||
* @param {number} options.vmid VMID the ports belong to.
|
||||
* @param {string} [options.portType] "game" | "dev" | ...
|
||||
*
|
||||
* @returns {Promise<number[]>} Array of port numbers, sorted ascending.
|
||||
*/
|
||||
static async reserve({ count, vmid, portType = DEFAULT_PORT_TYPE } = {}) {
|
||||
if (!count || count <= 0) {
|
||||
throw new Error('PortAllocationService.reserve: "count" must be > 0');
|
||||
}
|
||||
if (!vmid) {
|
||||
throw new Error('PortAllocationService.reserve: "vmid" is required');
|
||||
}
|
||||
|
||||
// 1) Find free ports of the requested type
|
||||
const candidates = await prisma.portPool.findMany({
|
||||
where: {
|
||||
status: PortStatus.free,
|
||||
portType,
|
||||
},
|
||||
orderBy: { port: 'asc' },
|
||||
take: count,
|
||||
});
|
||||
|
||||
if (candidates.length < count) {
|
||||
throw new Error(
|
||||
`PortAllocationService.reserve: not enough free ports for type "${portType}" ` +
|
||||
`(requested ${count}, found ${candidates.length})`
|
||||
);
|
||||
}
|
||||
|
||||
const ids = candidates.map((p) => p.id);
|
||||
const ports = candidates.map((p) => p.port);
|
||||
|
||||
// 2) Mark them as reserved for this VMID
|
||||
await prisma.portPool.updateMany({
|
||||
where: { id: { in: ids } },
|
||||
data: {
|
||||
status: PortStatus.reserved,
|
||||
allocatedTo: vmid,
|
||||
},
|
||||
});
|
||||
|
||||
return ports;
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit a set of reserved ports to a VMID once provisioning succeeds.
|
||||
*
|
||||
* This converts status "reserved" → "allocated".
|
||||
* If no ports provided, it's a no-op.
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {number} options.vmid
|
||||
* @param {number[]} options.ports
|
||||
* @param {string} [options.portType]
|
||||
*/
|
||||
static async commit({ vmid, ports, portType = DEFAULT_PORT_TYPE } = {}) {
|
||||
if (!vmid) {
|
||||
throw new Error('PortAllocationService.commit: "vmid" is required');
|
||||
}
|
||||
if (!ports || !Array.isArray(ports) || ports.length === 0) {
|
||||
// Nothing to commit – silently return
|
||||
return;
|
||||
}
|
||||
|
||||
await prisma.portPool.updateMany({
|
||||
where: {
|
||||
port: { in: ports },
|
||||
portType,
|
||||
allocatedTo: vmid,
|
||||
},
|
||||
data: {
|
||||
status: PortStatus.allocated,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Release all reserved/allocated ports associated with a VMID.
|
||||
*
|
||||
* Used in:
|
||||
* - Provisioning rollback (on error)
|
||||
* - Container deletion
|
||||
* - Reconciler correcting orphan state
|
||||
*
|
||||
* @param {number} vmid
|
||||
*/
|
||||
static async releaseByVmid(vmid) {
|
||||
if (!vmid) return;
|
||||
|
||||
await prisma.portPool.updateMany({
|
||||
where: {
|
||||
allocatedTo: vmid,
|
||||
},
|
||||
data: {
|
||||
status: PortStatus.free,
|
||||
allocatedTo: null,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Backup export syntax for older imports like:
|
||||
// import { PortAllocationService } from '../services/portAllocator.js';
|
||||
export default PortAllocationService;
|
||||
74
src/services/portPool.js
Normal file
74
src/services/portPool.js
Normal file
@ -0,0 +1,74 @@
|
||||
// ESM, Node 20+
|
||||
// Uses your Prisma model: PortPool (status: free|allocated|reserved)
|
||||
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
const START = 50000;
|
||||
const END = 59000;
|
||||
|
||||
export async function allocatePorts(count, { vmid, customerId, purpose } = {}) {
|
||||
if (!count || count < 1) return [];
|
||||
const out = [];
|
||||
|
||||
for (let p = START; p <= END && out.length < count; p++) {
|
||||
// try to find a seeded free row first
|
||||
const freeRow = await prisma.portPool.findFirst({
|
||||
where: { port: p, status: 'free' },
|
||||
select: { id: true },
|
||||
});
|
||||
|
||||
if (freeRow) {
|
||||
await prisma.portPool.update({
|
||||
where: { id: freeRow.id },
|
||||
data: {
|
||||
status: 'allocated',
|
||||
vmid: vmid ?? null,
|
||||
customerId: customerId ?? null,
|
||||
purpose: purpose ?? null,
|
||||
allocatedAt: new Date(),
|
||||
releasedAt: null,
|
||||
},
|
||||
});
|
||||
out.push(p);
|
||||
continue;
|
||||
}
|
||||
|
||||
// if not seeded, create on the fly (unique by ip+port+protocol)
|
||||
const exists = await prisma.portPool.findFirst({ where: { port: p } });
|
||||
if (!exists) {
|
||||
await prisma.portPool.create({
|
||||
data: {
|
||||
port: p,
|
||||
protocol: 'tcp',
|
||||
status: 'allocated',
|
||||
vmid: vmid ?? null,
|
||||
customerId: customerId ?? null,
|
||||
purpose: purpose ?? null,
|
||||
allocatedAt: new Date(),
|
||||
},
|
||||
});
|
||||
out.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
if (out.length < count) {
|
||||
throw new Error(`Not enough free ports in ${START}-${END}`);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
export async function releasePorts(ports) {
|
||||
if (!ports?.length) return;
|
||||
await prisma.portPool.updateMany({
|
||||
where: { port: { in: ports } },
|
||||
data: {
|
||||
status: 'free',
|
||||
vmid: null,
|
||||
customerId: null,
|
||||
purpose: null,
|
||||
releasedAt: new Date(),
|
||||
allocatedAt: null,
|
||||
},
|
||||
});
|
||||
}
|
||||
17
src/services/prisma.js
Normal file
17
src/services/prisma.js
Normal file
@ -0,0 +1,17 @@
|
||||
// src/services/prisma.js
|
||||
import { PrismaClient } from '@prisma/client';
|
||||
|
||||
const globalForPrisma = globalThis;
|
||||
|
||||
const _prisma =
|
||||
globalForPrisma.__zlh_prisma ??
|
||||
new PrismaClient({
|
||||
log: (process.env.PRISMA_LOG ?? 'error').split(',').map((s) => s.trim()),
|
||||
});
|
||||
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
globalForPrisma.__zlh_prisma = _prisma;
|
||||
}
|
||||
|
||||
export const prisma = _prisma; // ← named export (compat with imports using { prisma })
|
||||
export default _prisma; // ← keep default export too
|
||||
486
src/services/proxmoxClient.js
Normal file
486
src/services/proxmoxClient.js
Normal file
@ -0,0 +1,486 @@
|
||||
// src/services/proxmoxClient.js
|
||||
// Pure JS (ESM) Proxmox LXC client for ZeroLagHub
|
||||
// Cleaned version: execInContainer + updateMinecraftProperties removed
|
||||
|
||||
import axios from 'axios';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Auth & storage (tolerant) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function tokenHeader() {
|
||||
const raw =
|
||||
process.env.PROXMOX_API_TOKEN ||
|
||||
process.env.PVE_API_TOKEN ||
|
||||
process.env.PVEAPITOKEN ||
|
||||
'';
|
||||
|
||||
if (raw) {
|
||||
if (raw.startsWith('PVEAPIToken=')) return raw;
|
||||
if (raw.includes('!') && raw.includes('=')) return `PVEAPIToken=${raw}`;
|
||||
}
|
||||
|
||||
const u = process.env.PROXMOX_USER;
|
||||
const id = process.env.PROXMOX_API_TOKEN_ID;
|
||||
const sec = process.env.PROXMOX_API_TOKEN_SECRET;
|
||||
|
||||
if (u && id && sec) return `PVEAPIToken=${u}!${id}=${sec}`;
|
||||
|
||||
throw new Error(
|
||||
'Missing Proxmox API token. Set PROXMOX_API_TOKEN (or PVE_API_TOKEN/PVEAPITOKEN), ' +
|
||||
'or PROXMOX_USER + PROXMOX_API_TOKEN_ID + PROXMOX_API_TOKEN_SECRET.'
|
||||
);
|
||||
}
|
||||
|
||||
export const resolveStorage = () =>
|
||||
process.env.PROXMOX_STORAGE ||
|
||||
process.env.PROXMOX_DEFAULT_STORAGE ||
|
||||
'zlh-thin';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Base axios client */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
async function base(nodeOverride) {
|
||||
const node = nodeOverride || process.env.PROXMOX_NODE;
|
||||
if (!node) throw new Error('Missing PROXMOX_NODE');
|
||||
|
||||
const baseURL = (process.env.PROXMOX_HOST || '').replace(/\/+$/, '') + '/api2/json';
|
||||
if (!process.env.PROXMOX_HOST) throw new Error('Missing PROXMOX_HOST');
|
||||
|
||||
const { default: httpsMod } = await import('https');
|
||||
|
||||
const c = axios.create({
|
||||
baseURL,
|
||||
httpsAgent: new httpsMod.Agent({
|
||||
rejectUnauthorized:
|
||||
String(process.env.PROXMOX_VERIFY_TLS ?? 'true').toLowerCase() === 'true',
|
||||
}),
|
||||
headers: { Authorization: tokenHeader() },
|
||||
validateStatus: () => true,
|
||||
timeout: 30000,
|
||||
});
|
||||
|
||||
const form = (obj) =>
|
||||
new URLSearchParams(
|
||||
Object.entries(obj)
|
||||
.filter(([, v]) => v !== undefined && v !== null)
|
||||
.map(([k, v]) => [k, Array.isArray(v) ? v.join(',') : String(v)])
|
||||
);
|
||||
|
||||
function assertOk(resp, label = '') {
|
||||
if (!(resp?.status >= 200 && resp.status < 300)) {
|
||||
const info = resp?.data
|
||||
? JSON.stringify(resp.data)
|
||||
: String(resp?.statusText || resp?.status);
|
||||
const err = new Error(`[Proxmox ${label}] HTTP ${resp?.status} ${info}`);
|
||||
err.httpCode = resp?.status;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
return { c, node, form, assertOk };
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Tasks */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function upidNode(upid, fallbackNode) {
|
||||
if (typeof upid !== 'string') return fallbackNode;
|
||||
const parts = upid.split(':');
|
||||
return parts.length > 1 && parts[1] ? parts[1] : fallbackNode;
|
||||
}
|
||||
|
||||
export async function pollTask(upid, { intervalMs = 1000, timeoutMs = 5 * 60_000 } = {}) {
|
||||
if (!upid) return true;
|
||||
const { c, node } = await base();
|
||||
const taskNode = upidNode(upid, node);
|
||||
const start = Date.now();
|
||||
|
||||
while (true) {
|
||||
const r = await c.get(`/nodes/${taskNode}/tasks/${encodeURIComponent(upid)}/status`);
|
||||
if (r.status >= 200 && r.status < 300) {
|
||||
const st = r.data?.data;
|
||||
if (st?.status === 'stopped') {
|
||||
const ok = String(st?.exitstatus || '').toUpperCase().startsWith('OK');
|
||||
if (ok) return true;
|
||||
throw new Error(`Task ${upid} failed: ${st?.exitstatus || 'unknown'}`);
|
||||
}
|
||||
}
|
||||
if (Date.now() - start > timeoutMs) {
|
||||
throw new Error(`Task ${upid} timed out`);
|
||||
}
|
||||
await new Promise((r2) => setTimeout(r2, intervalMs));
|
||||
}
|
||||
}
|
||||
|
||||
async function findRecentTaskUpid(vmid, type, { sinceEpochSec, timeoutMs = 15000 } = {}) {
|
||||
const { c, node, assertOk } = await base();
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
const since = sinceEpochSec ?? Math.floor(Date.now() / 1000) - 2;
|
||||
|
||||
while (Date.now() < deadline) {
|
||||
const r = await c.get(`/nodes/${node}/tasks`, { params: { since, vmid } });
|
||||
assertOk(r, 'tasks/list');
|
||||
const list = Array.isArray(r.data?.data) ? r.data.data : [];
|
||||
const hit = list.find(
|
||||
(t) =>
|
||||
String(t?.id) === String(vmid) &&
|
||||
String(t?.type).toLowerCase() === String(type).toLowerCase()
|
||||
);
|
||||
if (hit?.upid) return hit.upid;
|
||||
await new Promise((r2) => setTimeout(r2, 800));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Status & waits */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function getContainerStatus(vmid, nodeOverride) {
|
||||
const { c, node, assertOk } = await base(nodeOverride);
|
||||
const r = await c.get(`/nodes/${node}/lxc/${vmid}/status/current`);
|
||||
assertOk(r, 'lxc/status/current');
|
||||
return r.data?.data;
|
||||
}
|
||||
|
||||
export async function getContainerConfig(vmid, nodeOverride) {
|
||||
const { c, node, assertOk } = await base(nodeOverride);
|
||||
const r = await c.get(`/nodes/${node}/lxc/${vmid}/config`);
|
||||
assertOk(r, 'lxc/get-config');
|
||||
return r.data?.data || {};
|
||||
}
|
||||
|
||||
export async function waitForStatus(vmid, desired, { timeoutMs = 180000, everyMs = 1200 } = {}) {
|
||||
const until = Date.now() + timeoutMs;
|
||||
while (Date.now() < until) {
|
||||
const st = await getContainerStatus(vmid);
|
||||
if (String(st?.status).toLowerCase() === desired) return true;
|
||||
await new Promise((r) => setTimeout(r, everyMs));
|
||||
}
|
||||
throw new Error(`Container ${vmid} did not reach status=${desired} in time`);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Lifecycle */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function startContainer(vmid, nodeOverride) {
|
||||
const { c, node, assertOk } = await base(nodeOverride);
|
||||
const since = Math.floor(Date.now() / 1000) - 1;
|
||||
|
||||
const r = await c.post(`/nodes/${node}/lxc/${vmid}/status/start`);
|
||||
assertOk(r, 'lxc/start');
|
||||
|
||||
let upid = r.data?.data;
|
||||
if (!upid) {
|
||||
upid = await findRecentTaskUpid(vmid, 'vzstart', { sinceEpochSec: since });
|
||||
}
|
||||
|
||||
return upid || null;
|
||||
}
|
||||
|
||||
export async function startWithRetry(vmid, { retries = 6, delayMs = 1200 } = {}) {
|
||||
let attempt = 0;
|
||||
await new Promise((r) => setTimeout(r, 400));
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
const upid = await startContainer(vmid);
|
||||
|
||||
if (upid) {
|
||||
await pollTask(upid, { timeoutMs: 120000 });
|
||||
} else {
|
||||
await waitForStatus(vmid, 'running', { timeoutMs: 180000, everyMs: 1200 });
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (err) {
|
||||
const msg = String(err?.message || '');
|
||||
const isConfigLock =
|
||||
msg.includes("can't lock file '/run/lock/lxc/pve-config-") &&
|
||||
msg.includes('got timeout');
|
||||
|
||||
if (isConfigLock && attempt < retries) {
|
||||
attempt++;
|
||||
await new Promise((r) => setTimeout(r, delayMs));
|
||||
continue;
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function shutdownContainer(vmid, { timeout = 60 } = {}) {
|
||||
const { c, node, form, assertOk } = await base();
|
||||
const since = Math.floor(Date.now() / 1000) - 1;
|
||||
|
||||
const r = await c.post(
|
||||
`/nodes/${node}/lxc/${vmid}/status/shutdown`,
|
||||
form({ timeout }),
|
||||
{ headers: { 'Content-Type': 'application/x-www-form-urlencoded' } }
|
||||
);
|
||||
assertOk(r, 'lxc/shutdown');
|
||||
|
||||
return (
|
||||
r.data?.data ||
|
||||
(await findRecentTaskUpid(vmid, 'vzshutdown', { sinceEpochSec: since }))
|
||||
);
|
||||
}
|
||||
|
||||
export async function stopContainer(vmid, { timeout = 60 } = {}) {
|
||||
const { c, node, form, assertOk } = await base();
|
||||
const since = Math.floor(Date.now() / 1000) - 1;
|
||||
|
||||
const r = await c.post(
|
||||
`/nodes/${node}/lxc/${vmid}/status/stop`,
|
||||
form({ timeout }),
|
||||
{ headers: { 'Content-Type': 'application/x-www-form-urlencoded' } }
|
||||
);
|
||||
assertOk(r, 'lxc/stop');
|
||||
|
||||
return (
|
||||
r.data?.data ||
|
||||
(await findRecentTaskUpid(vmid, 'vzstop', { sinceEpochSec: since }))
|
||||
);
|
||||
}
|
||||
|
||||
export async function deleteContainer(vmid) {
|
||||
const { c, node, assertOk } = await base();
|
||||
|
||||
try {
|
||||
const up = await stopContainer(vmid, { timeout: 60 });
|
||||
await pollTask(up, { timeoutMs: 120000 });
|
||||
} catch {}
|
||||
|
||||
const r = await c.delete(`/nodes/${node}/lxc/${vmid}`);
|
||||
assertOk(r, 'lxc/delete');
|
||||
return r.data?.data;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Create & configure */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function cloneContainer({ templateVmid, vmid, name, storage, full = 1, pool }) {
|
||||
const { c, node, form, assertOk } = await base();
|
||||
const chosenStorage = storage || resolveStorage();
|
||||
|
||||
const body = form({
|
||||
newid: vmid,
|
||||
hostname: name,
|
||||
full,
|
||||
storage: chosenStorage,
|
||||
pool: pool || process.env.PROXMOX_POOL || undefined,
|
||||
});
|
||||
|
||||
const r = await c.post(`/nodes/${node}/lxc/${templateVmid}/clone`, body, {
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
});
|
||||
assertOk(r, 'lxc/clone');
|
||||
|
||||
await pollTask(r.data?.data);
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function configureContainer({
|
||||
vmid,
|
||||
cpu,
|
||||
memory,
|
||||
bridge,
|
||||
description,
|
||||
tags,
|
||||
}) {
|
||||
const { c, node, form, assertOk } = await base();
|
||||
|
||||
const net0 = bridge ? `name=eth0,bridge=${bridge},ip=dhcp,type=veth` : undefined;
|
||||
|
||||
const params = {
|
||||
...(cpu != null ? { cores: Number(cpu) } : {}),
|
||||
...(memory != null ? { memory: Number(memory) } : {}),
|
||||
...(net0 ? { net0 } : {}),
|
||||
...(tags ? { tags } : {}),
|
||||
...(description ? { description } : {}),
|
||||
};
|
||||
|
||||
if (!Object.keys(params).length) return true;
|
||||
|
||||
const r = await c.put(`/nodes/${node}/lxc/${vmid}/config`, form(params), {
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
});
|
||||
assertOk(r, 'lxc/config');
|
||||
|
||||
await pollTask(r.data?.data);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Resize & attach disk */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function resizeContainer(vmid, { disk = 'rootfs', addGiB }) {
|
||||
if (process.env.PVE_ALLOW_RESIZE !== '1')
|
||||
throw new Error('Resize disabled by server config');
|
||||
|
||||
const { c, node, form, assertOk } = await base();
|
||||
const r = await c.put(
|
||||
`/nodes/${node}/lxc/${vmid}/resize`,
|
||||
form({ disk, size: `+${Number(addGiB)}G` }),
|
||||
{ headers: { 'Content-Type': 'application/x-www-form-urlencoded' } }
|
||||
);
|
||||
assertOk(r, 'lxc/resize');
|
||||
return r.data?.data;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Mount points */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function attachMountPoint(
|
||||
vmid,
|
||||
{ storage, sizeGiB, mp, mountPath = '/data', options = {} }
|
||||
) {
|
||||
if (process.env.PVE_ALLOW_OPTIONS !== '1')
|
||||
throw new Error('Disk attach disabled by server config');
|
||||
|
||||
const { c, node, form, assertOk } = await base();
|
||||
const STORAGE = storage || resolveStorage();
|
||||
|
||||
const cur = String((await getContainerStatus(vmid))?.status || '').toLowerCase();
|
||||
|
||||
if (cur === 'running') {
|
||||
const up1 = await shutdownContainer(vmid, {
|
||||
timeout: Number(process.env.PVE_SHUTDOWN_TIMEOUT || 60),
|
||||
});
|
||||
await pollTask(up1, { timeoutMs: 180000 });
|
||||
|
||||
let st = String((await getContainerStatus(vmid))?.status || '').toLowerCase();
|
||||
if (st === 'running') {
|
||||
const up2 = await stopContainer(vmid, {
|
||||
timeout: Number(process.env.PVE_STOP_TIMEOUT || 60),
|
||||
});
|
||||
await pollTask(up2, { timeoutMs: 120000 });
|
||||
}
|
||||
|
||||
await waitForStatus(vmid, 'stopped', { timeoutMs: 180000, everyMs: 1200 });
|
||||
}
|
||||
|
||||
const cfgRes = await c.get(`/nodes/${node}/lxc/${vmid}/config`);
|
||||
assertOk(cfgRes, 'lxc/get-config');
|
||||
const cfg = cfgRes.data?.data || {};
|
||||
|
||||
let mpKey = mp;
|
||||
if (!mpKey) {
|
||||
for (let i = 0; i <= 9; i++) {
|
||||
if (!cfg[`mp${i}`]) {
|
||||
mpKey = `mp${i}`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!mpKey) throw new Error('No free mountpoint slots (mp0..mp9)');
|
||||
} else if (cfg[mpKey]) {
|
||||
throw new Error(`${mpKey} is already in use`);
|
||||
}
|
||||
|
||||
const extra = Object.entries(options || {})
|
||||
.map(([k, v]) => `,${k}=${encodeURIComponent(v)}`)
|
||||
.join('');
|
||||
|
||||
const value = `${STORAGE}:${Number(sizeGiB)},mp=${mountPath}${extra}`;
|
||||
|
||||
const putRes = await c.put(
|
||||
`/nodes/${node}/lxc/${vmid}/config`,
|
||||
form({ [mpKey]: value }),
|
||||
{ headers: { 'Content-Type': 'application/x-www-form-urlencoded' } }
|
||||
);
|
||||
|
||||
assertOk(putRes, 'lxc/config(mp)');
|
||||
|
||||
const upid = await startContainer(vmid);
|
||||
await pollTask(upid, { timeoutMs: 120000 });
|
||||
await waitForStatus(vmid, 'running', { timeoutMs: 180000, everyMs: 1000 });
|
||||
|
||||
return putRes.data?.data;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Listing, status, misc */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export async function ping() {
|
||||
const { c } = await base();
|
||||
try {
|
||||
const { status, data } = await c.get('/cluster/status');
|
||||
return status >= 200 && status < 300 && Boolean(data?.data);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getTaskStatus(upid) {
|
||||
if (!upid) return null;
|
||||
|
||||
const { c, node, assertOk } = await base();
|
||||
const taskNode = upidNode(upid, node);
|
||||
|
||||
const r = await c.get(`/nodes/${taskNode}/tasks/${encodeURIComponent(upid)}/status`);
|
||||
assertOk(r, 'tasks/status');
|
||||
|
||||
return r.data?.data;
|
||||
}
|
||||
|
||||
export async function getContainerInterfaces(vmid, nodeOverride) {
|
||||
const { c, node, assertOk } = await base(nodeOverride);
|
||||
const r = await c.get(`/nodes/${node}/lxc/${vmid}/interfaces`);
|
||||
assertOk(r, 'lxc/interfaces');
|
||||
return r.data?.data || [];
|
||||
}
|
||||
|
||||
export async function listContainers(nodeOverride = null) {
|
||||
const { c, node, assertOk } = await base(nodeOverride);
|
||||
|
||||
const activeNode = nodeOverride || node;
|
||||
try {
|
||||
const r = await c.get(`/nodes/${activeNode}/lxc`);
|
||||
assertOk(r, 'lxc/list');
|
||||
|
||||
const containers = Array.isArray(r.data?.data) ? r.data.data : [];
|
||||
return containers.map((ct) => ({
|
||||
vmid: ct.vmid,
|
||||
hostname: ct.name || ct.hostname || `ct-${ct.vmid}`,
|
||||
status: ct.status,
|
||||
}));
|
||||
} catch (err) {
|
||||
console.error(`[proxmoxClient] Failed to list containers: ${err.message}`);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Final export */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export default {
|
||||
cloneContainer,
|
||||
configureContainer,
|
||||
//cloneStartConfigure,
|
||||
startContainer,
|
||||
shutdownContainer,
|
||||
stopContainer,
|
||||
deleteContainer,
|
||||
getContainerStatus,
|
||||
getContainerConfig,
|
||||
resizeContainer,
|
||||
attachMountPoint,
|
||||
startWithRetry,
|
||||
pollTask,
|
||||
waitForStatus,
|
||||
resolveStorage,
|
||||
ping,
|
||||
getTaskStatus,
|
||||
getContainerInterfaces,
|
||||
listContainers,
|
||||
};
|
||||
151
src/services/proxyClient.js
Normal file
151
src/services/proxyClient.js
Normal file
@ -0,0 +1,151 @@
|
||||
// src/services/proxyClient.js
|
||||
// Writes Traefik dynamic config to remote proxy VM over SSH.
|
||||
// Uses game-specific entryPoints (matching traefik.yml defaults).
|
||||
|
||||
import { exec } from 'child_process';
|
||||
import path from 'path';
|
||||
|
||||
const TRAEFIK_HOST = process.env.TRAEFIK_HOST || 'zlhproxy@100.71.44.12';
|
||||
const DYNAMIC_DIR = '/etc/traefik/dynamic';
|
||||
|
||||
/**
|
||||
* Map of game → { entryPoint, protocol, defaultPort }
|
||||
* Must align exactly with traefik.yml entryPoints.
|
||||
*/
|
||||
const GAME_ENTRYPOINTS = {
|
||||
minecraft: { entryPoint: 'minecraft', protocol: 'tcp', defaultPort: 25565 },
|
||||
mcp: { entryPoint: 'minecraft', protocol: 'tcp', defaultPort: 25565 },
|
||||
rust: { entryPoint: 'rust', protocol: 'udp', defaultPort: 28015 },
|
||||
terraria: { entryPoint: 'terraria', protocol: 'tcp', defaultPort: 7777 },
|
||||
projectzomboid: { entryPoint: 'projectzomboid', protocol: 'udp', defaultPort: 16261 },
|
||||
valheim: { entryPoint: 'valheim', protocol: 'udp', defaultPort: 2456 },
|
||||
palworld: { entryPoint: 'palworld', protocol: 'udp', defaultPort: 8211 },
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute a remote command to write a file on the Traefik host.
|
||||
*/
|
||||
async function writeRemoteConfig(filename, content) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const remotePath = path.posix.join(DYNAMIC_DIR, filename);
|
||||
const cmd = `ssh ${TRAEFIK_HOST} "cat > ${remotePath}"`;
|
||||
const child = exec(cmd, (err) => {
|
||||
if (err) return reject(err);
|
||||
resolve();
|
||||
});
|
||||
child.stdin.write(content);
|
||||
child.stdin.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a file from the Traefik host.
|
||||
*/
|
||||
async function removeRemoteConfig(filename) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const remotePath = path.posix.join(DYNAMIC_DIR, filename);
|
||||
const cmd = `ssh ${TRAEFIK_HOST} "rm -f ${remotePath}"`;
|
||||
exec(cmd, (err) => {
|
||||
if (err) return reject(err);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* ✅ Check whether a dynamic YAML already exists for a hostname.
|
||||
* @param {string} hostname
|
||||
* @returns {Promise<boolean>}
|
||||
*/
|
||||
export async function routeExists(hostname) {
|
||||
return new Promise((resolve) => {
|
||||
const cmd = `ssh ${TRAEFIK_HOST} "test -f ${DYNAMIC_DIR}/${hostname}.yml"`;
|
||||
exec(cmd, (err) => {
|
||||
if (err) {
|
||||
console.log(`[proxyClient] No route file found for ${hostname}`);
|
||||
resolve(false);
|
||||
} else {
|
||||
console.log(`[proxyClient] Route file exists for ${hostname}`);
|
||||
resolve(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a dynamic proxy config for a container.
|
||||
* @param {object} opts
|
||||
* @param {number} opts.vmid
|
||||
* @param {string} opts.hostname
|
||||
* @param {number} opts.externalPort
|
||||
* @param {string} opts.ctIp
|
||||
* @param {number} opts.ctPort
|
||||
* @param {string} opts.game
|
||||
* @param {string} opts.protocol (optional override)
|
||||
*/
|
||||
export async function addProxyConfig({ vmid, hostname, externalPort, ctIp, ctPort, game, protocol }) {
|
||||
if (!hostname || !externalPort || !ctIp || !ctPort) {
|
||||
throw new Error(`[proxyClient] Missing required params`);
|
||||
}
|
||||
|
||||
const gameMeta = GAME_ENTRYPOINTS[game] || { entryPoint: 'minecraft', protocol: 'tcp', defaultPort: 25565 };
|
||||
const entryPoint = gameMeta.entryPoint;
|
||||
const proto = protocol || gameMeta.protocol;
|
||||
|
||||
const safeName = `${hostname}-${vmid}`; // safer unique key
|
||||
const file = `${safeName}.yml`;
|
||||
let yaml = '';
|
||||
|
||||
if (proto === 'tcp') {
|
||||
yaml = `
|
||||
tcp:
|
||||
routers:
|
||||
${safeName}-router:
|
||||
entryPoints:
|
||||
- ${entryPoint}
|
||||
rule: "HostSNI(\`*\`)"
|
||||
service: ${safeName}-svc
|
||||
|
||||
services:
|
||||
${safeName}-svc:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- address: "${ctIp}:${ctPort}"
|
||||
`;
|
||||
} else if (proto === 'udp') {
|
||||
yaml = `
|
||||
udp:
|
||||
routers:
|
||||
${safeName}-router:
|
||||
entryPoints:
|
||||
- ${entryPoint}
|
||||
service: ${safeName}-svc
|
||||
|
||||
services:
|
||||
${safeName}-svc:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- address: "${ctIp}:${ctPort}"
|
||||
`;
|
||||
} else {
|
||||
throw new Error(`[proxyClient] Unsupported protocol=${proto}`);
|
||||
}
|
||||
|
||||
await writeRemoteConfig(file, yaml);
|
||||
console.log(`[proxyClient] ✓ wrote remote config ${file} (${proto.toUpperCase()} ${hostname} → ${ctIp}:${ctPort} on entryPoint ${entryPoint})`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a dynamic proxy config.
|
||||
*/
|
||||
export async function removeProxyConfig({ hostname }) {
|
||||
const file = `${hostname}.yml`;
|
||||
try {
|
||||
await removeRemoteConfig(file);
|
||||
console.log(`[proxyClient] ✓ removed remote config ${file}`);
|
||||
} catch (err) {
|
||||
console.warn(`[proxyClient] remove failed: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
export default { addProxyConfig, removeProxyConfig, routeExists };
|
||||
367
src/services/technitiumClient.js
Normal file
367
src/services/technitiumClient.js
Normal file
@ -0,0 +1,367 @@
|
||||
/**
|
||||
* ZeroLagHub – Technitium DNS Client
|
||||
*
|
||||
* Responsibilities:
|
||||
* - List all records in a zone (for reconcile / debugging)
|
||||
* - Create A + SRV for internal DNS (used by edgePublisher)
|
||||
* - Delete A + SRV for a given hostname (used by dePublisher / reconcile)
|
||||
*
|
||||
* API used in other modules:
|
||||
* - dns.listRecords()
|
||||
* - dns.listSRVRecords()
|
||||
* - dns.delARecord({ hostname })
|
||||
* - dns.delSRVRecord({ hostname })
|
||||
* - dns.findRecordsByHostname(hostname)
|
||||
* - dns.addARecord({ hostname, ipAddress })
|
||||
* - dns.addSRVRecord({ hostname, port, target? })
|
||||
*/
|
||||
|
||||
import fetch from "node-fetch";
|
||||
|
||||
const API_URL =
|
||||
process.env.TECHNITIUM_API_URL || "http://10.60.0.253:5380/api";
|
||||
const API_TOKEN = process.env.TECHNITIUM_API_TOKEN;
|
||||
const ZONE = process.env.TECHNITIUM_ZONE || "zerolaghub.quest";
|
||||
|
||||
// Internal Velocity / Traefik targets
|
||||
const ZLH_IPS = ["10.60.0.242", "10.70.0.241"];
|
||||
|
||||
if (!API_TOKEN) {
|
||||
console.warn(
|
||||
"[technitiumClient] ⚠️ TECHNITIUM_API_TOKEN is not set – DNS operations will fail."
|
||||
);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Helpers */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
function zoneSuffix() {
|
||||
return `.${ZONE}`;
|
||||
}
|
||||
|
||||
function normalizeHostname(hostname) {
|
||||
if (!hostname) return "";
|
||||
const h = hostname.toLowerCase();
|
||||
return h.endsWith(zoneSuffix()) ? h : `${h}${zoneSuffix()}`;
|
||||
}
|
||||
|
||||
function shortHost(hostname) {
|
||||
const h = hostname.toLowerCase();
|
||||
const suffix = zoneSuffix();
|
||||
return h.endsWith(suffix) ? h.slice(0, -suffix.length) : h;
|
||||
}
|
||||
|
||||
async function techGet(pathAndQuery) {
|
||||
const url = `${API_URL}${pathAndQuery}`;
|
||||
const res = await fetch(url);
|
||||
const json = await res.json();
|
||||
if (json.status !== "ok") {
|
||||
const msg = json.errorMessage || "Technitium API error";
|
||||
throw new Error(msg);
|
||||
}
|
||||
return json;
|
||||
}
|
||||
|
||||
async function techPost(path, bodyParams) {
|
||||
const url = `${API_URL}${path}`;
|
||||
const body = new URLSearchParams(
|
||||
Object.entries({
|
||||
token: API_TOKEN,
|
||||
zone: ZONE,
|
||||
...bodyParams,
|
||||
})
|
||||
);
|
||||
|
||||
const res = await fetch(url, { method: "POST", body });
|
||||
const json = await res.json();
|
||||
return json;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* List Records */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
async function listRecords() {
|
||||
try {
|
||||
const json = await techGet(
|
||||
`/zones/records/get?token=${API_TOKEN}&zone=${encodeURIComponent(
|
||||
ZONE
|
||||
)}&listZone=true`
|
||||
);
|
||||
|
||||
const records = (json.response?.records || []).map((r) => ({
|
||||
...r,
|
||||
name: r.domain || r.name || "",
|
||||
}));
|
||||
|
||||
console.log(
|
||||
`[technitiumClient] ✓ Retrieved ${records.length} records from ${ZONE}`
|
||||
);
|
||||
return records;
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Failed to list records for ${ZONE}: ${err.message}`
|
||||
);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async function listSRVRecords() {
|
||||
const all = await listRecords();
|
||||
return all.filter((r) => r.type === "SRV");
|
||||
}
|
||||
|
||||
async function findRecordsByHostname(hostname) {
|
||||
const all = await listRecords();
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
const short = shortHost(hostname);
|
||||
|
||||
return all.filter((r) => {
|
||||
const name = r.name || "";
|
||||
return (
|
||||
name === fqdn ||
|
||||
name === short ||
|
||||
name.endsWith(`.${short}`) ||
|
||||
name.includes(short)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* A Records – Add + Delete */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
async function addARecord({ hostname, ipAddress, ttl = 60 }) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
|
||||
try {
|
||||
const json = await techPost("/zones/records/add", {
|
||||
domain: fqdn,
|
||||
type: "A",
|
||||
ttl: String(ttl),
|
||||
ipAddress,
|
||||
});
|
||||
|
||||
if (json.status === "ok") {
|
||||
console.log(
|
||||
`[technitiumClient] ➕ Created A record ${fqdn} -> ${ipAddress}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Failed to add A record for ${fqdn}: ${json.errorMessage}`
|
||||
);
|
||||
return false;
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Error adding A record for ${fqdn}: ${err.message}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function delARecord({ hostname }) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
|
||||
let anyDeleted = false;
|
||||
|
||||
for (const ip of ZLH_IPS) {
|
||||
try {
|
||||
const json = await techPost("/zones/records/delete", {
|
||||
domain: fqdn,
|
||||
type: "A",
|
||||
ipAddress: ip,
|
||||
});
|
||||
|
||||
if (json.status === "ok") {
|
||||
anyDeleted = true;
|
||||
console.log(
|
||||
`[technitiumClient] 🗑️ Deleted A record ${fqdn} (${ip})`
|
||||
);
|
||||
} else if (json.errorMessage?.includes("no such record")) {
|
||||
console.log(
|
||||
`[technitiumClient] (A) Not found: ${fqdn} (${ip}) – already gone`
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Delete A failed for ${fqdn} (${ip}): ${json.errorMessage}`
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Exception deleting A record for ${fqdn} (${ip}): ${err.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return anyDeleted;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* SRV Records (_minecraft._tcp) – Add + Delete */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
async function addSRVRecord({
|
||||
hostname,
|
||||
port,
|
||||
ttl = 60,
|
||||
priority = 0,
|
||||
weight = 0,
|
||||
target,
|
||||
}) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
const short = shortHost(hostname);
|
||||
const srvDomain = `_minecraft._tcp.${short}.${ZONE}`;
|
||||
const srvTarget = normalizeHostname(target || hostname);
|
||||
|
||||
try {
|
||||
const json = await techPost("/zones/records/add", {
|
||||
domain: srvDomain,
|
||||
type: "SRV",
|
||||
ttl: String(ttl),
|
||||
priority: String(priority),
|
||||
weight: String(weight),
|
||||
port: String(port),
|
||||
target: srvTarget,
|
||||
});
|
||||
|
||||
if (json.status === "ok") {
|
||||
console.log(
|
||||
`[technitiumClient] ➕ Created SRV ${srvDomain} (port=${port}, target=${srvTarget})`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Failed to add SRV for ${fqdn}: ${json.errorMessage}`
|
||||
);
|
||||
return false;
|
||||
} catch (err) {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ Error adding SRV for ${fqdn}: ${err.message}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function delSRVRecord({ hostname }) {
|
||||
const fqdn = normalizeHostname(hostname);
|
||||
const short = shortHost(hostname);
|
||||
const srvDomain = `_minecraft._tcp.${short}.${ZONE}`;
|
||||
|
||||
try {
|
||||
const getJson = await techGet(
|
||||
`/zones/records/get?token=${API_TOKEN}&domain=${encodeURIComponent(
|
||||
srvDomain
|
||||
)}&zone=${encodeURIComponent(ZONE)}`
|
||||
);
|
||||
|
||||
const srvRecords = (getJson.response?.records || []).filter(
|
||||
(r) => r.type === "SRV"
|
||||
);
|
||||
|
||||
if (srvRecords.length === 0) {
|
||||
console.log(
|
||||
`[technitiumClient] (SRV) Not found: ${srvDomain} (no SRV records for this name)`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
let deleted = 0;
|
||||
|
||||
for (const rec of srvRecords) {
|
||||
const rData = rec.rData || rec.rdata || rec.data || {};
|
||||
|
||||
const priority = String(rData.priority ?? 0);
|
||||
const weight = String(rData.weight ?? 0);
|
||||
const port = String(rData.port ?? 0);
|
||||
const target = rData.target || fqdn;
|
||||
|
||||
const json = await techPost("/zones/records/delete", {
|
||||
domain: srvDomain,
|
||||
type: "SRV",
|
||||
priority,
|
||||
weight,
|
||||
port,
|
||||
target,
|
||||
});
|
||||
|
||||
if (json.status === "ok") {
|
||||
deleted++;
|
||||
console.log(
|
||||
`[technitiumClient] 🗑️ Deleted SRV record ${srvDomain} (port=${port}, target=${target})`
|
||||
);
|
||||
} else if (json.errorMessage?.includes("no such record")) {
|
||||
console.log(
|
||||
`[technitiumClient] (SRV) Not found while deleting: ${srvDomain} (port=${port}, target=${target})`
|
||||
);
|
||||
} else {
|
||||
console.warn(
|
||||
`[technitiumClient] ⚠️ SRV delete failed for ${srvDomain}: ${json.errorMessage}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return deleted > 0;
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[technitiumClient] ⚠️ SRV delete failed for ${fqdn}: ${err.message}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Health Check */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
async function healthDiag() {
|
||||
try {
|
||||
const json = await techGet(
|
||||
`/zones/records/get?token=${API_TOKEN}&zone=${encodeURIComponent(
|
||||
ZONE
|
||||
)}&listZone=true`
|
||||
);
|
||||
const count = json.response?.records?.length || 0;
|
||||
return {
|
||||
ok: true,
|
||||
zone: ZONE,
|
||||
recordCount: count,
|
||||
};
|
||||
} catch (err) {
|
||||
return {
|
||||
ok: false,
|
||||
zone: ZONE,
|
||||
error: err.message,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Export Default */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
const technitiumClient = {
|
||||
listRecords,
|
||||
listSRVRecords,
|
||||
findRecordsByHostname,
|
||||
addARecord,
|
||||
addSRVRecord,
|
||||
delARecord,
|
||||
delSRVRecord,
|
||||
healthDiag,
|
||||
};
|
||||
|
||||
export default technitiumClient;
|
||||
export {
|
||||
listRecords,
|
||||
listSRVRecords,
|
||||
findRecordsByHostname,
|
||||
addARecord,
|
||||
addSRVRecord,
|
||||
delARecord,
|
||||
delSRVRecord,
|
||||
healthDiag,
|
||||
};
|
||||
48
src/services/templateResolver.js
Normal file
48
src/services/templateResolver.js
Normal file
@ -0,0 +1,48 @@
|
||||
// src/services/templateResolver.js
|
||||
import prisma from './prisma.js';
|
||||
|
||||
/**
|
||||
* Resolve a template by slug (preferred) or by (game, variant).
|
||||
* Throws with clear messages if Prisma is mis-generated or the row is missing.
|
||||
*/
|
||||
export async function getTemplateOrThrow({ templateSlug, game, variant }) {
|
||||
// Guard: ensure the Prisma delegate exists
|
||||
if (!prisma?.containerTemplate) {
|
||||
throw new Error(
|
||||
"[templateResolver] prisma.containerTemplate is missing. " +
|
||||
"Run `npx prisma generate` and ensure model `ContainerTemplate` exists."
|
||||
);
|
||||
}
|
||||
|
||||
// Build where clause
|
||||
const where =
|
||||
templateSlug ? { slug: templateSlug } :
|
||||
(game && variant ? { game, variant } : null);
|
||||
|
||||
if (!where) {
|
||||
const err = new Error('templateSlug is required (or provide game+variant)');
|
||||
err.httpCode = 400;
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Query
|
||||
const tpl = templateSlug
|
||||
? await prisma.containerTemplate.findUnique({ where })
|
||||
: await prisma.containerTemplate.findFirst({ where });
|
||||
|
||||
if (!tpl) {
|
||||
throw new Error(`Template not found for ${JSON.stringify(where)}`);
|
||||
}
|
||||
|
||||
// Sanity: your golden templates live at 900–925
|
||||
if (tpl.templateVmid < 900 || tpl.templateVmid > 925) {
|
||||
throw new Error(
|
||||
`[templateResolver] Unexpected templateVmid ${tpl.templateVmid} for slug ${tpl.slug}`
|
||||
);
|
||||
}
|
||||
|
||||
return tpl;
|
||||
}
|
||||
|
||||
// Optional default export (same function) to support both import styles
|
||||
export default { getTemplateOrThrow };
|
||||
44
src/services/templateService.js
Normal file
44
src/services/templateService.js
Normal file
@ -0,0 +1,44 @@
|
||||
// src/services/templateService.js
|
||||
// Thin wrapper over templateResolver to keep old imports working.
|
||||
// Prefer importing getTemplateOrThrow directly from templateResolver.
|
||||
|
||||
import { getTemplateOrThrow } from './templateResolver.js';
|
||||
|
||||
/**
|
||||
* New API (preferred):
|
||||
* resolveTemplate({ templateSlug, game, variant })
|
||||
*
|
||||
* Back-compat:
|
||||
* resolveTemplate(game, variant, ctype) // ctype is ignored for slug lookup
|
||||
*/
|
||||
export async function resolveTemplate(a, b, c) {
|
||||
// Back-compat detection: (game, variant, ctype)
|
||||
if (typeof a === 'string' && typeof b === 'string') {
|
||||
const game = a;
|
||||
const variant = b;
|
||||
// NOTE: ctype is intentionally ignored for slug resolution.
|
||||
const tpl = await getTemplateOrThrow({ game, variant });
|
||||
return normalizeTemplate(tpl);
|
||||
}
|
||||
|
||||
// Preferred object form: { templateSlug, game, variant }
|
||||
const { templateSlug, game, variant } = (a || {});
|
||||
const tpl = await getTemplateOrThrow({ templateSlug, game, variant });
|
||||
return normalizeTemplate(tpl);
|
||||
}
|
||||
|
||||
function normalizeTemplate(tpl) {
|
||||
return {
|
||||
slug: tpl.slug,
|
||||
ctype: tpl.ctype,
|
||||
game: tpl.game,
|
||||
variant: tpl.variant,
|
||||
templateVmid: tpl.templateVmid,
|
||||
resources: tpl.resources ?? {},
|
||||
network: tpl.network ?? {},
|
||||
files: tpl.files ?? {},
|
||||
startup: Array.isArray(tpl.startup) ? tpl.startup : [],
|
||||
};
|
||||
}
|
||||
|
||||
export default { resolveTemplate };
|
||||
138
src/services/velocityClient.js
Normal file
138
src/services/velocityClient.js
Normal file
@ -0,0 +1,138 @@
|
||||
// src/services/velocityClient.js
|
||||
// Handles dynamic backend registration with the Velocity ZpackVelocityBridge plugin.
|
||||
|
||||
import fetch from "node-fetch";
|
||||
import crypto from "crypto";
|
||||
import dotenv from "dotenv";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const VELOCITY_URL = process.env.VELOCITY_URL || "http://10.70.0.241:8081";
|
||||
const SHARED_SECRET = process.env.ZPACK_SECRET;
|
||||
|
||||
function getSecretHash() {
|
||||
return crypto.createHash("sha256").update(SHARED_SECRET).digest("hex");
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* EXISTENCE CHECK */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
export async function serverExists(name) {
|
||||
try {
|
||||
const res = await fetch(`${VELOCITY_URL}/zpack/list`);
|
||||
if (!res.ok) return false;
|
||||
const data = await res.json();
|
||||
return Array.isArray(data.servers) && data.servers.some(s => s.name === name);
|
||||
} catch (err) {
|
||||
console.error(`[velocityClient] ⚠️ Server existence check failed: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Internal deduplication cache ---
|
||||
const registrationCache = new Map();
|
||||
|
||||
/**
|
||||
* Register a backend server with Velocity dynamically.
|
||||
* Adds deduplication to prevent duplicate registration attempts within 10 seconds.
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {string} params.name - FQDN or short server name
|
||||
* @param {string} params.address - IP address of the backend container
|
||||
* @param {number} params.port - Listening port (e.g. 25565)
|
||||
*/
|
||||
export async function registerServer({ name, address, port }) {
|
||||
const key = `${name}:${address}:${port}`;
|
||||
const now = Date.now();
|
||||
|
||||
// Debounce repeated registrations for the same backend
|
||||
if (registrationCache.has(key) && now - registrationCache.get(key) < 10000) {
|
||||
console.log(`[velocityClient] Skipping duplicate registration for ${key}`);
|
||||
return "duplicate-skip";
|
||||
}
|
||||
registrationCache.set(key, now);
|
||||
|
||||
const payload = { server_name: name, address, port };
|
||||
const secretHash = getSecretHash();
|
||||
|
||||
console.log(`[velocityClient] Registering backend: ${name} -> ${address}:${port}`);
|
||||
console.log(`[velocityClient] Using Velocity URL: ${VELOCITY_URL}`);
|
||||
|
||||
const res = await fetch(`${VELOCITY_URL}/zpack/register`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-Zpack-Secret": secretHash,
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`Velocity register failed (${res.status}): ${text}`);
|
||||
}
|
||||
|
||||
const text = await res.text();
|
||||
console.log(`[velocityClient] ✓ Velocity registered ${name} → ${address}:${port}`);
|
||||
return text;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister a backend server from Velocity dynamically.
|
||||
* @param {string} name - Short hostname or FQDN
|
||||
*/
|
||||
export async function unregisterServer(name) {
|
||||
// Convert short name → full FQDN if needed
|
||||
let serverName = name;
|
||||
const ZONE = process.env.CF_ZONE_NAME || "zerolaghub.quest";
|
||||
if (!serverName.includes(".")) {
|
||||
serverName = `${serverName}.${ZONE}`;
|
||||
}
|
||||
|
||||
const payload = { server_name: serverName };
|
||||
const secretHash = getSecretHash();
|
||||
|
||||
console.log(`[velocityClient] Unregistering backend: ${serverName}`);
|
||||
|
||||
let res;
|
||||
try {
|
||||
res = await fetch(`${VELOCITY_URL}/zpack/unregister`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"X-Zpack-Secret": secretHash,
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(`[velocityClient] ⚠️ Velocity unreachable: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const text = await res.text();
|
||||
|
||||
// ---------------------------
|
||||
// Idempotent delete:
|
||||
// Velocity returns 404 when the backend is already removed → this is SUCCESS
|
||||
// ---------------------------
|
||||
if (res.status === 404) {
|
||||
console.log(
|
||||
`[velocityClient] ✓ Backend already removed (idempotent): ${serverName}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!res.ok) {
|
||||
console.error(
|
||||
`[velocityClient] ❌ Velocity unregister failed (${res.status}): ${text}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
console.log(`[velocityClient] ✓ Velocity unregistered ${serverName}`);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
export default { registerServer, unregisterServer };
|
||||
88
src/services/vmidAllocator.js
Normal file
88
src/services/vmidAllocator.js
Normal file
@ -0,0 +1,88 @@
|
||||
// src/services/vmidAllocator.js
|
||||
// Counter-based VMID allocator with wrap + clash probe.
|
||||
|
||||
import * as prismaSvc from './prisma.js';
|
||||
const prisma = prismaSvc.prisma ?? prismaSvc.default;
|
||||
|
||||
const RANGES = {
|
||||
game: { min: 5000, max: 5999 },
|
||||
dev: { min: 6000, max: 6999 },
|
||||
};
|
||||
|
||||
/**
|
||||
* Internal helper: allocate the next VMID in a given range for a key ("game" | "dev").
|
||||
* - Uses VmidCounter as the single source of truth.
|
||||
* - Wraps when exceeding max.
|
||||
* - Probes ContainerInstance to avoid collisions.
|
||||
*/
|
||||
async function nextId(key, { min, max }) {
|
||||
return prisma.$transaction(async (tx) => {
|
||||
// Get or create the counter row
|
||||
let row = await tx.vmidCounter.findUnique({ where: { key } });
|
||||
if (!row) {
|
||||
row = await tx.vmidCounter.create({
|
||||
data: { key, current: min - 1 },
|
||||
});
|
||||
}
|
||||
|
||||
const totalSlots = max - min + 1;
|
||||
let candidate = row.current;
|
||||
let attempts = 0;
|
||||
|
||||
while (attempts < totalSlots) {
|
||||
candidate += 1;
|
||||
if (candidate > max) candidate = min;
|
||||
|
||||
// Check for an existing ContainerInstance with this vmid
|
||||
const existing = await tx.containerInstance.findUnique({
|
||||
where: { vmid: candidate },
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
// Update the counter to this new value
|
||||
await tx.vmidCounter.update({
|
||||
where: { key },
|
||||
data: { current: candidate },
|
||||
});
|
||||
|
||||
return candidate;
|
||||
}
|
||||
|
||||
attempts += 1;
|
||||
}
|
||||
|
||||
// If we got here, the range is fully exhausted
|
||||
throw new Error(
|
||||
`No free VMIDs available in range ${min}-${max} for key="${key}".`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate a VMID for a container type.
|
||||
* ctype: "game" | "dev"
|
||||
*/
|
||||
export async function allocateVmid(ctype) {
|
||||
const key = ctype === 'dev' ? 'dev' : 'game';
|
||||
return nextId(key, RANGES[key]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stub for now – kept for API compatibility.
|
||||
* If you ever want to do extra verification after provisioning,
|
||||
* you can expand this.
|
||||
*/
|
||||
export async function confirmVmidAllocated(_vmid) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stub for now – VMIDs are not re-used immediately.
|
||||
* If you later decide to support "returning" VMIDs to a free pool,
|
||||
* implement that here (carefully).
|
||||
*/
|
||||
export async function releaseVmid(_vmid) {
|
||||
return true;
|
||||
}
|
||||
|
||||
export default { allocateVmid, confirmVmidAllocated, releaseVmid };
|
||||
82
src/services/vyosClient.js.old
Normal file
82
src/services/vyosClient.js.old
Normal file
@ -0,0 +1,82 @@
|
||||
// src/services/vyosClient.js
|
||||
import axios from 'axios';
|
||||
import https from 'node:https';
|
||||
import FormData from 'form-data';
|
||||
|
||||
// ENV
|
||||
const BASE = (process.env.VYOS_API_URL || '').replace(/\/+$/, ''); // e.g. https://10.60.0.254:8443
|
||||
const KEY = process.env.VYOS_API_KEY || '';
|
||||
const TIMEOUT = Number(process.env.VYOS_TIMEOUT_MS || 10000);
|
||||
const ALLOW_SELF = process.env.VYOS_ALLOW_SELF_SIGNED === '1';
|
||||
|
||||
if (!BASE) throw new Error('VYOS_API_URL is required');
|
||||
if (!KEY) throw new Error('VYOS_API_KEY is required');
|
||||
|
||||
const httpsAgent = ALLOW_SELF ? new https.Agent({ rejectUnauthorized: false }) : undefined;
|
||||
|
||||
// helper: POST multipart form (key + data)
|
||||
async function postForm(path, payload) {
|
||||
const fd = new FormData();
|
||||
fd.append('key', KEY);
|
||||
fd.append('data', typeof payload === 'string' ? payload : JSON.stringify(payload));
|
||||
const { data } = await axios.post(`${BASE}${path}`, fd, {
|
||||
timeout: TIMEOUT,
|
||||
httpsAgent,
|
||||
headers: fd.getHeaders(),
|
||||
transitional: { forcedJSONParsing: false },
|
||||
maxBodyLength: Infinity,
|
||||
});
|
||||
return data;
|
||||
}
|
||||
|
||||
export default {
|
||||
/** Lightweight health — /info doesn’t require multipart */
|
||||
async healthDiag() {
|
||||
const out = { ok: false, url: `${BASE}/info`, note: null };
|
||||
try {
|
||||
const { data } = await axios.get(`${BASE}/info?key=${encodeURIComponent(KEY)}`, {
|
||||
timeout: TIMEOUT, httpsAgent,
|
||||
});
|
||||
out.ok = !!data;
|
||||
return out;
|
||||
} catch (e) {
|
||||
out.note = e?.response?.status ? `info ${e.response.status}` : (e?.message || 'info error');
|
||||
return out;
|
||||
}
|
||||
},
|
||||
|
||||
/** Read config subtree, e.g. ['nat','destination','rule'] */
|
||||
async retrieve(path = []) {
|
||||
return postForm('/retrieve', { op: 'showConfig', path });
|
||||
},
|
||||
|
||||
/** Create/Update DNAT: WAN:dstPort -> natIp:natPort */
|
||||
async createPortForward({ wanIf = 'eth0', descr = '', proto = 'tcp', dstPort, natIp, natPort }) {
|
||||
const ruleId = String(dstPort); // deterministic: use public port as rule id
|
||||
const nodes = [
|
||||
{ path: ['nat','destination','rule',ruleId,'description'], value: descr },
|
||||
{ path: ['nat','destination','rule',ruleId,'inbound-interface'], value: wanIf },
|
||||
{ path: ['nat','destination','rule',ruleId,'destination','port'], value: String(dstPort) },
|
||||
{ path: ['nat','destination','rule',ruleId,'protocol'], value: proto },
|
||||
{ path: ['nat','destination','rule',ruleId,'translation','address'], value: natIp },
|
||||
{ path: ['nat','destination','rule',ruleId,'translation','port'], value: String(natPort ?? dstPort) },
|
||||
];
|
||||
const raw = await postForm('/configure', { op: 'set', nodes, commit: true, save: true });
|
||||
return { id: ruleId, raw };
|
||||
},
|
||||
|
||||
/** Delete DNAT by id (or port) */
|
||||
async deletePortForward(idOrPort) {
|
||||
const ruleId = String(idOrPort);
|
||||
return postForm('/configure', {
|
||||
op: 'delete',
|
||||
nodes: [{ path: ['nat','destination','rule',ruleId] }],
|
||||
commit: true, save: true,
|
||||
});
|
||||
},
|
||||
|
||||
/** Symmetry with old client; VyOS commits in /configure */
|
||||
async applyChanges() {
|
||||
return { ok: true, note: 'VyOS commits done in /configure' };
|
||||
},
|
||||
};
|
||||
11
src/test-getip.js
Normal file
11
src/test-getip.js
Normal file
@ -0,0 +1,11 @@
|
||||
// src/test-getip.js
|
||||
|
||||
import { getCtIp } from './services/getCtIp.js';
|
||||
|
||||
const vmid = process.argv[2];
|
||||
const node = 'zlh-prod1'; // force node for test
|
||||
|
||||
(async () => {
|
||||
const ip = await getCtIp(vmid, node);
|
||||
console.log(`CT ${vmid} IP:`, ip || 'No IP found');
|
||||
})();
|
||||
13
src/tmp/check-prisma.mjs
Normal file
13
src/tmp/check-prisma.mjs
Normal file
@ -0,0 +1,13 @@
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
const p = new PrismaClient();
|
||||
|
||||
try {
|
||||
const keys = Object.keys(p);
|
||||
console.log('Delegates:', keys);
|
||||
console.log('has containerTemplate:', !!p.containerTemplate);
|
||||
} catch (e) {
|
||||
console.error('Prisma introspection failed:', e);
|
||||
} finally {
|
||||
await p.$disconnect();
|
||||
}
|
||||
35
src/tmp/test-templates.mjs
Normal file
35
src/tmp/test-templates.mjs
Normal file
@ -0,0 +1,35 @@
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
// adjust relative paths: tmp → services (one level up)
|
||||
import { getTemplateOrThrow } from '../services/templateResolver.js';
|
||||
import { resolveTemplate } from '../services/templateService.js';
|
||||
const p = new PrismaClient();
|
||||
|
||||
function okRange(vmid) {
|
||||
return typeof vmid === 'number' && vmid >= 900 && vmid <= 925;
|
||||
}
|
||||
|
||||
async function runOnce(label, fn) {
|
||||
try {
|
||||
const tpl = await fn();
|
||||
console.log(`✅ ${label}`, {
|
||||
slug: tpl.slug ?? tpl?.slug,
|
||||
vmid: tpl.templateVmid ?? tpl?.templateVmid,
|
||||
});
|
||||
if (!okRange(tpl.templateVmid ?? tpl?.templateVmid)) {
|
||||
console.error('⚠ vmid out of expected template range (900–925)');
|
||||
process.exitCode = 2;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`❌ ${label}`, e?.message || e);
|
||||
process.exitCode = 1;
|
||||
}
|
||||
}
|
||||
|
||||
(async () => {
|
||||
await runOnce('resolver by slug', () => getTemplateOrThrow({ templateSlug: 'mc-vanilla' }));
|
||||
await runOnce('resolver by (game,variant)', () => getTemplateOrThrow({ game: 'minecraft', variant: 'vanilla' }));
|
||||
await runOnce('service wrapper by slug', () => resolveTemplate({ templateSlug: 'mc-vanilla' }));
|
||||
await runOnce('service wrapper by (game,variant)', () => resolveTemplate({ game: 'minecraft', variant: 'vanilla' }));
|
||||
await p.$disconnect();
|
||||
})();
|
||||
22
src/utils/configBuilder.js
Normal file
22
src/utils/configBuilder.js
Normal file
@ -0,0 +1,22 @@
|
||||
export function buildEffectiveConfig(template, overrides = {}, system = {}) {
|
||||
const templateDefaults = template?.defaults ?? {
|
||||
cpu: template?.defaultCpu,
|
||||
memory: template?.defaultMemory,
|
||||
disk: template?.defaultDisk,
|
||||
};
|
||||
const templateNetwork = template?.network ?? (template?.bridge ? { bridge: template.bridge } : {});
|
||||
|
||||
|
||||
const cfg = {
|
||||
...templateDefaults, // 1) defaults
|
||||
...templateNetwork, // 2) network
|
||||
...overrides, // 3) user overrides win
|
||||
storage: template?.storage ?? overrides?.storage ?? undefined,
|
||||
};
|
||||
|
||||
|
||||
if (system.vmid !== undefined) cfg.vmid = system.vmid; // 4) system always wins
|
||||
if (system.ports !== undefined) cfg.ports = system.ports;
|
||||
if (!Array.isArray(cfg.ports)) cfg.ports = [];
|
||||
return cfg;
|
||||
}
|
||||
89
src/utils/portAllocation.js.old
Normal file
89
src/utils/portAllocation.js.old
Normal file
@ -0,0 +1,89 @@
|
||||
// /opt/zpack-api/src/utils/portAllocation.js
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
|
||||
export class PortAllocationService {
|
||||
static async allocatePortBlock(customerId, portCount = 3) {
|
||||
return await prisma.$transaction(async (tx) => {
|
||||
// Check customer exists and get current allocations
|
||||
const customer = await tx.customer.findUnique({
|
||||
where: { id: customerId },
|
||||
include: { ports: true }
|
||||
})
|
||||
|
||||
if (!customer) throw new Error('Customer not found')
|
||||
|
||||
// Return existing allocation if found
|
||||
const existingAllocation = customer.ports[0]
|
||||
if (existingAllocation) {
|
||||
return {
|
||||
customerId,
|
||||
basePort: existingAllocation.basePort,
|
||||
ports: Array.from({length: existingAllocation.count},
|
||||
(_, i) => existingAllocation.basePort + i),
|
||||
isExisting: true
|
||||
}
|
||||
}
|
||||
|
||||
// Find available port range
|
||||
const basePort = await this.findAvailablePortRange(tx, portCount)
|
||||
|
||||
// Create new allocation
|
||||
const allocation = await tx.portAllocation.create({
|
||||
data: {
|
||||
customerId,
|
||||
basePort,
|
||||
count: portCount
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
customerId,
|
||||
basePort,
|
||||
ports: Array.from({length: portCount}, (_, i) => basePort + i),
|
||||
isExisting: false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
static async findAvailablePortRange(tx, count) {
|
||||
// Get all existing allocations sorted by basePort
|
||||
const allocations = await tx.portAllocation.findMany({
|
||||
orderBy: { basePort: 'asc' }
|
||||
})
|
||||
|
||||
let startPort = 50000 // Your port range start
|
||||
|
||||
for (const allocation of allocations) {
|
||||
// Check if there's a gap before this allocation
|
||||
if (startPort + count <= allocation.basePort) {
|
||||
return startPort
|
||||
}
|
||||
// Move past this allocation
|
||||
startPort = allocation.basePort + allocation.count
|
||||
}
|
||||
|
||||
// No conflicts found, return next available
|
||||
if (startPort > 60000) {
|
||||
throw new Error('Port range exhausted')
|
||||
}
|
||||
|
||||
return startPort
|
||||
}
|
||||
|
||||
static async getCustomerPorts(customerId) {
|
||||
const allocation = await prisma.portAllocation.findFirst({
|
||||
where: { customerId }
|
||||
})
|
||||
|
||||
if (!allocation) return null
|
||||
|
||||
return {
|
||||
customerId,
|
||||
basePort: allocation.basePort,
|
||||
ports: Array.from({length: allocation.count},
|
||||
(_, i) => allocation.basePort + i)
|
||||
}
|
||||
}
|
||||
}
|
||||
36
src/utils/portPool.js.old
Normal file
36
src/utils/portPool.js.old
Normal file
@ -0,0 +1,36 @@
|
||||
// src/utils/portPool.js
|
||||
import prisma from '../services/prisma.js';
|
||||
|
||||
|
||||
export const PortPool = {
|
||||
async allocate({ customerId, vmid, count = 1, ip = null, protocol = 'tcp', purpose = 'game' }) {
|
||||
return prisma.$transaction(async (tx) => {
|
||||
const rows = await tx.portPool.findMany({
|
||||
where: { status: 'free', protocol, ip },
|
||||
orderBy: { port: 'asc' },
|
||||
take: count
|
||||
})
|
||||
if (rows.length < count) throw new Error('Insufficient free ports')
|
||||
|
||||
const ids = rows.map(r => r.id)
|
||||
const now = new Date()
|
||||
await tx.portPool.updateMany({
|
||||
where: { id: { in: ids } },
|
||||
data: { status: 'allocated', customerId, vmid, purpose, allocatedAt: now, releasedAt: null }
|
||||
})
|
||||
return rows.map(r => r.port)
|
||||
})
|
||||
},
|
||||
|
||||
async releaseByVmid(vmid) {
|
||||
await prisma.portPool.updateMany({
|
||||
where: { vmid, status: 'allocated' },
|
||||
data: { status: 'free', customerId: null, vmid: null, purpose: null, releasedAt: new Date() }
|
||||
})
|
||||
},
|
||||
|
||||
async getByVmid(vmid) {
|
||||
const rows = await prisma.portPool.findMany({ where: { vmid, status: 'allocated' }, orderBy: { port: 'asc' } })
|
||||
return rows.map(r => r.port)
|
||||
}
|
||||
}
|
||||
7
src/utils/redis.js
Normal file
7
src/utils/redis.js
Normal file
@ -0,0 +1,7 @@
|
||||
import dotenv from 'dotenv';
|
||||
dotenv.config();
|
||||
|
||||
export const redisOptions = {
|
||||
host: process.env.REDIS_HOST || '127.0.0.1',
|
||||
port: parseInt(process.env.REDIS_PORT || '6379', 10)
|
||||
};
|
||||
11
src/worker.js
Normal file
11
src/worker.js
Normal file
@ -0,0 +1,11 @@
|
||||
import { provisionWorker } from './jobs/provisionProcessor.js'
|
||||
|
||||
console.log('🚀 ZeroLagHub workers started')
|
||||
console.log('📋 Listening for provision jobs...')
|
||||
|
||||
// Graceful shutdown
|
||||
process.on('SIGTERM', async () => {
|
||||
console.log('Shutting down workers...')
|
||||
await provisionWorker.close()
|
||||
process.exit(0)
|
||||
})
|
||||
33
test-a-srv-delete.mjs
Normal file
33
test-a-srv-delete.mjs
Normal file
@ -0,0 +1,33 @@
|
||||
import {
|
||||
createARecord,
|
||||
createSRVRecord,
|
||||
delARecord,
|
||||
delSRVRecord,
|
||||
} from "./src/services/cloudflareClient.js";
|
||||
|
||||
const hostname = "mc-test-422.zerolaghub.quest";
|
||||
const ip = "139.64.165.248";
|
||||
const port = 50065;
|
||||
|
||||
console.log("\n=== TEST: A + SRV Create / Delete ===\n");
|
||||
|
||||
// --- Create ---
|
||||
await createARecord({ hostname, ip });
|
||||
await createSRVRecord({
|
||||
service: "minecraft",
|
||||
protocol: "tcp",
|
||||
hostname,
|
||||
port,
|
||||
target: hostname,
|
||||
});
|
||||
|
||||
console.log("\n✅ Created both records. Check Cloudflare now.");
|
||||
|
||||
// --- Wait 5 seconds ---
|
||||
await new Promise((r) => setTimeout(r, 5000));
|
||||
|
||||
// --- Delete ---
|
||||
await delSRVRecord({ service: "minecraft", protocol: "tcp", hostname });
|
||||
await delARecord({ hostname });
|
||||
|
||||
console.log("\n✅ Deleted both records. Verify in Cloudflare.\n");
|
||||
25
test-a-srv.mjs
Normal file
25
test-a-srv.mjs
Normal file
@ -0,0 +1,25 @@
|
||||
import { createARecord, createSRVRecord } from "./src/services/cloudflareClient.js";
|
||||
|
||||
// ensure Cloudflare env vars are loaded
|
||||
if (!process.env.CLOUDFLARE_API_TOKEN || !process.env.CLOUDFLARE_ZONE_ID) {
|
||||
console.error("❌ Missing Cloudflare env vars!");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const hostname = "mc-test-421.zerolaghub.quest";
|
||||
const ip = "139.64.165.248";
|
||||
const port = 50065;
|
||||
|
||||
console.log("\n=== TEST: A + SRV Creation ===\n");
|
||||
|
||||
await createARecord({ hostname, ip });
|
||||
|
||||
await createSRVRecord({
|
||||
service: "minecraft",
|
||||
protocol: "tcp",
|
||||
hostname,
|
||||
port,
|
||||
target: hostname,
|
||||
});
|
||||
|
||||
console.log("\n✅ Done. Check Cloudflare for both A + SRV.\n");
|
||||
7
test-edge.mjs
Normal file
7
test-edge.mjs
Normal file
@ -0,0 +1,7 @@
|
||||
import { unpublish } from './src/services/dePublisher.js'
|
||||
await unpublish({
|
||||
hostname: 'mc-test-5095.zerolaghub.quest',
|
||||
vmid: 5095,
|
||||
game: 'minecraft',
|
||||
ports: [50065]
|
||||
})
|
||||
9
test-srv.mjs
Normal file
9
test-srv.mjs
Normal file
@ -0,0 +1,9 @@
|
||||
import { createSRVRecord } from "./src/services/cloudflareClient.js";
|
||||
|
||||
await createSRVRecord({
|
||||
service: "minecraft",
|
||||
protocol: "tcp",
|
||||
hostname: "mc-test-420.zerolaghub.quest",
|
||||
port: 50065,
|
||||
target: "mc-test-420.zerolaghub.quest"
|
||||
});
|
||||
BIN
zpack-api-boilerplate.zip
Normal file
BIN
zpack-api-boilerplate.zip
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user