puq-docker-n8n-deploy
工作流概述
这是一个包含34个节点的复杂工作流,主要用于自动化处理各种任务。
工作流源代码
{
"id": "cY8OVKzHS0ScRhP9",
"meta": {
"instanceId": "ffb0782f8b2cf4278577cb919e0cd26141bc9ff8774294348146d454633aa4e3",
"templateCredsSetupCompleted": true
},
"name": "puq-docker-n8n-deploy",
"tags": [],
"nodes": [
{
"id": "fc30f537-51d2-45df-b1c4-5d4cd9d80a0e",
"name": "If",
"type": "n8n-nodes-base.if",
"position": [
-2060,
-320
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "b702e607-888a-42c9-b9a7-f9d2a64dfccd",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.server_domain }}",
"rightValue": "={{ $('API').item.json.body.server_domain }}"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "6152fc38-2e50-4db5-b6f6-6e7d2492bbb1",
"name": "Parametrs",
"type": "n8n-nodes-base.set",
"position": [
-2280,
-320
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "a6328600-7ee0-4031-9bdb-fcee99b79658",
"name": "server_domain",
"type": "string",
"value": "d01-test.uuq.pl"
},
{
"id": "370ddc4e-0fc0-48f6-9b30-ebdfba72c62f",
"name": "clients_dir",
"type": "string",
"value": "/opt/docker/clients"
},
{
"id": "92202bb8-6113-4bc5-9a29-79d238456df2",
"name": "mount_dir",
"type": "string",
"value": "/mnt"
},
{
"id": "baa52df2-9c10-42b2-939f-f05ea85ea2be",
"name": "screen_left",
"type": "string",
"value": "{{"
},
{
"id": "2b19ed99-2630-412a-98b6-4be44d35d2e7",
"name": "screen_right",
"type": "string",
"value": "}}"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "cf1b3eea-0439-418b-8c68-f7e45ddfbc7e",
"name": "API",
"type": "n8n-nodes-base.webhook",
"position": [
-2600,
-320
],
"webhookId": "4e8168b3-2cad-462a-9750-152986331ce2",
"parameters": {
"path": "docker-n8n",
"options": {},
"httpMethod": [
"POST"
],
"responseMode": "responseNode",
"authentication": "basicAuth",
"multipleMethods": true
},
"credentials": {
"httpBasicAuth": {
"id": "fiFY4Gv1SsaJfJvv",
"name": "n8n"
}
},
"typeVersion": 2
},
{
"id": "516ac020-add2-4b08-ae91-bfb95dec2f88",
"name": "422-Invalid server domain",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
-2100,
0
],
"parameters": {
"options": {
"responseCode": 422
},
"respondWith": "json",
"responseBody": "[{
\"status\": \"error\",
\"error\": \"Invalid server domain\"
}]"
},
"typeVersion": 1.1,
"alwaysOutputData": false
},
{
"id": "4e50cf8e-cfa7-4249-a847-9f8ff27664e4",
"name": "Code1",
"type": "n8n-nodes-base.code",
"position": [
100,
100
],
"parameters": {
"mode": "runOnceForEachItem",
"jsCode": "try {
if ($json.stdout === 'success') {
return {
json: {
status: 'success',
message: '',
data: '',
}
};
}
const parsedData = JSON.parse($json.stdout);
return {
json: {
status: parsedData.status === 'error' ? 'error' : 'success',
message: parsedData.message || (parsedData.status === 'error' ? 'An error occurred' : ''),
data: parsedData || '',
}
};
} catch (error) {
return {
json: {
status: 'error',
message: $json.stdout,
data: '',
}
};
}"
},
"executeOnce": false,
"retryOnFail": false,
"typeVersion": 2,
"alwaysOutputData": false
},
{
"id": "c7575c21-00dc-4238-95aa-3e20ff5d21a3",
"name": "SSH",
"type": "n8n-nodes-base.ssh",
"onError": "continueErrorOutput",
"position": [
-180,
100
],
"parameters": {
"cwd": "=/",
"command": "={{ $json.sh }}"
},
"credentials": {
"sshPassword": {
"id": "Cyjy61UWHwD2Xcd8",
"name": "d01-test.uuq.pl-puq"
}
},
"executeOnce": true,
"typeVersion": 1
},
{
"id": "2e017042-f991-45c7-afc4-ffdfcded4003",
"name": "Container Actions",
"type": "n8n-nodes-base.switch",
"position": [
-1680,
580
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "start",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_start"
}
]
},
"renameOutput": true
},
{
"outputKey": "stop",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_stop"
}
]
},
"renameOutput": true
},
{
"outputKey": "mount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "727971bf-4218-41c1-9b07-22df4b947852",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_mount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "unmount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0c80b1d9-e7ca-4cf3-b3ac-b40fdf4dd8f8",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_unmount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "138c5436-dd66-48d4-bca4-6af6c80cd903",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_set_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "fa39e80b-4aa4-4cd3-af3c-14acfa9cf2d8",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_set_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_net",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "46b0d65f-20d6-467a-94fb-407370d967f7",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_net"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "2fdb7b98-d6f8-44b4-917b-1ed2bb0e65f8",
"name": "Service Actions",
"type": "n8n-nodes-base.switch",
"position": [
-1240,
-1140
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "test_connection",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "3afdd2f1-fe93-47c2-95cd-bac9b1d94eeb",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "test_connection"
}
]
},
"renameOutput": true
},
{
"outputKey": "create",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "102f10e9-ec6c-4e63-ba95-0fe6c7dc0bd1",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "create"
}
]
},
"renameOutput": true
},
{
"outputKey": "suspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "f62dfa34-6751-4b34-adcc-3d6ba1b21a8c",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "suspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "unsuspend",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "384d2026-b753-4c27-94c2-8f4fc189eb5f",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "unsuspend"
}
]
},
"renameOutput": true
},
{
"outputKey": "terminate",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0e190a97-827a-4e87-8222-093ff7048b21",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "terminate"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_package",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "6f7832f3-b61d-4517-ab6b-6007998136dd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_package"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "ac5541f1-94e4-4c7e-a3a5-58b56b9e1ea8",
"name": "Container Stats",
"type": "n8n-nodes-base.switch",
"position": [
-1680,
-340
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "inspect",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_inspect"
}
]
},
"renameOutput": true
},
{
"outputKey": "stats",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_stats"
}
]
},
"renameOutput": true
},
{
"outputKey": "log",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "50ede522-af22-4b7a-b1fd-34b27fd3fadd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_log"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "33dab9ef-121f-4b8a-af2b-7e1151ebd95f",
"name": "API answer",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
400,
100
],
"parameters": {
"options": {
"responseCode": 200
},
"respondWith": "allIncomingItems"
},
"typeVersion": 1.1,
"alwaysOutputData": true
},
{
"id": "3b8ae835-901c-44b5-9635-7c1d92509704",
"name": "Inspect",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1360,
-540
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
INSPECT_JSON=\"{}\"
if sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then
INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")
fi
echo \"{\\"inspect\\": $INSPECT_JSON}\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "8193cb7c-50eb-49d3-a4d3-1c0b7686a1b1",
"name": "Stat",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1360,
-340
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
# Initialize empty container data
INSPECT_JSON=\"{}\"
STATS_JSON=\"{}\"
# Check if container is running
if sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then
# Get Docker inspect info in JSON (as raw string)
INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")
# Get Docker stats info in JSON (as raw string)
STATS_JSON=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME\")
STATS_JSON=${STATS_JSON:-'{}'}
fi
# Initialize disk info variables
MOUNT_USED=\"N/A\"
MOUNT_FREE=\"N/A\"
MOUNT_TOTAL=\"N/A\"
MOUNT_PERCENT=\"N/A\"
IMG_SIZE=\"N/A\"
IMG_PERCENT=\"N/A\"
DISK_STATS_IMG=\"N/A\"
# Check if mount directory exists and is accessible
if [ -d \"$MOUNT_DIR\" ]; then
if mount | grep -q \"$MOUNT_DIR\"; then
# Get disk usage for mounted directory
DISK_STATS_MOUNT=$(df -h \"$MOUNT_DIR\" | tail -n 1)
MOUNT_USED=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $3}')
MOUNT_FREE=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $4}')
MOUNT_TOTAL=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $2}')
MOUNT_PERCENT=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $5}')
fi
fi
# Check if image file exists
if [ -f \"$IMG_FILE\" ]; then
# Get disk usage for image file
IMG_SIZE=$(du -sh \"$IMG_FILE\" | awk '{print $1}')
fi
# Manually create a combined JSON object
FINAL_JSON=\"{\\"inspect\\": $INSPECT_JSON, \\"stats\\": $STATS_JSON, \\"disk\\": {\\"mounted\\": {\\"used\\": \\"$MOUNT_USED\\", \\"free\\": \\"$MOUNT_FREE\\", \\"total\\": \\"$MOUNT_TOTAL\\", \\"percent\\": \\"$MOUNT_PERCENT\\"}, \\"img_file\\": {\\"size\\": \\"$IMG_SIZE\\"}}}\"
# Output the result
echo \"$FINAL_JSON\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "1888734f-11a9-47a1-8353-c6b2862ff437",
"name": "Start",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1200,
240
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
if ! df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"
fi
if sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then
handle_error \"{{ $('API').item.json.body.domain }} container is running\"
fi
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Start the Docker containers
if ! sudo docker-compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log)
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# Success
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "e9886d6a-c537-4a1e-b2d4-1c4bfaf379e3",
"name": "Stop",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1080,
340
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Check if Docker container is running
if ! sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then
handle_error \"{{ $('API').item.json.body.domain }} container is not running\"
fi
# Stop and remove the Docker containers (also remove associated volumes)
if ! sudo docker-compose -f \"$COMPOSE_DIR/docker-compose.yml\" down > /dev/null 2>&1; then
handle_error \"Failed to stop and remove docker-compose containers\"
fi
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "07ff56b6-6b49-4215-9801-2c13124e0023",
"name": "Test Connection1",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-1140
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Function to log an error, print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
handle_error \"Docker is not installed\"
fi
# Check if Docker service is running
if ! systemctl is-active --quiet docker; then
handle_error \"Docker service is not running\"
fi
# Check if nginx-proxy container is running
if ! sudo docker ps --filter \"name=nginx-proxy\" --filter \"status=running\" -q > /dev/null; then
handle_error \"nginx-proxy container is not running\"
fi
# Check if letsencrypt-nginx-proxy-companion container is running
if ! sudo docker ps --filter \"name=letsencrypt-nginx-proxy-companion\" --filter \"status=running\" -q > /dev/null; then
handle_error \"letsencrypt-nginx-proxy-companion container is not running\"
fi
# If everything is successful
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "e8ad165b-fb70-41fe-938d-5f600c57d1d0",
"name": "Deploy",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-980
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ $('Deploy-docker-compose').item.json[\"docker-compose\"] }}'
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
NGINX_MAIN_TEXT='{{ $('nginx').item.json['main'] }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ $('nginx').item.json['main_location'] }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Function to handle errors: write to the status file and print the message to console
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null # Write error to the status file
echo \"error: $1\" # Print the error message to the console
exit 1 # Exit the script with an error code
}
# Check if the directory already exists. If yes, exit with an error.
if [ -d \"$COMPOSE_DIR\" ]; then
echo \"error: Directory $COMPOSE_DIR already exists\"
exit 1
fi
# Create necessary directories with permissions
sudo mkdir -p \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_DIR\"
sudo mkdir -p \"$NGINX_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_DIR\"
sudo mkdir -p \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $MOUNT_DIR\"
# Set permissions on the created directories
sudo chmod -R 777 \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $COMPOSE_DIR\"
sudo chmod -R 777 \"$NGINX_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $NGINX_DIR\"
sudo chmod -R 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
# Create docker-compose.yml file
echo \"$DOCKER_COMPOSE_TEXT\" | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo \"\" | sudo tee \"$NGINX_MAIN_ACL_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_ACL_FILE\"
echo \"$NGINX_MAIN_TEXT\" | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo \"$NGINX_MAIN_LOCATION_TEXT\" | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Create data.img file if it doesn't exist
if [ ! -f \"$IMG_FILE\" ]; then
sudo fallocate -l \"$DISK_SIZE\"G \"$IMG_FILE\" > /dev/null 2>&1 || sudo truncate -s \"$DISK_SIZE\"G \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $IMG_FILE\"
sudo mkfs.ext4 \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to format $IMG_FILE\" # Format the image as ext4
sync # Synchronize the data to disk
fi
# Add an entry to /etc/fstab for mounting if not already present
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add entry to /etc/fstab\"
fi
# Mount all entries in /etc/fstab
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
# Set permissions on the mount directory
sudo chmod -R 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
# Start Docker containers using docker-compose
if ! sudo docker-compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# If everything is successful, update the status file and print success message
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "a6346a99-545f-4873-ac41-8c25941f34e9",
"name": "Suspend",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-820
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
# Function to log an error, write to status file, and print to console
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
# Stop and remove Docker containers (also remove associated volumes)
if [ -f \"$COMPOSE_FILE\" ]; then
if ! sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1; then
handle_error \"Failed to stop and remove docker-compose containers\"
fi
else
echo \"Warning: docker-compose.yml not found, skipping container stop.\"
fi
# Remove mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove the mount directory
if [ -d \"$MOUNT_DIR\" ]; then
sudo rm -rf \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to remove $MOUNT_DIR\"
fi
# Remove NGINX configuration files
[ -f \"$VHOST_MAIN_FILE\" ] && sudo rm -f \"$VHOST_MAIN_FILE\" || handle_error \"Warning: $VHOST_MAIN_FILE not found.\"
[ -f \"$VHOST_MAIN_LOCATION_FILE\" ] && sudo rm -f \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Warning: $VHOST_MAIN_LOCATION_FILE not found.\"
# Update status
echo \"suspended\" | sudo tee \"$STATUS_FILE\" > /dev/null
# Success
echo \"success\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "61cba08a-a873-4a1c-98a4-75a3986d8e63",
"name": "Terminated",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-480
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
# Function to log an error, write to status file, and print to console
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"error: $1\"
exit 1
}
# Stop and remove the Docker containers
if [ -f \"$COMPOSE_FILE\" ]; then
sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1
fi
# Remove the mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is still mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove all related directories and files
for item in \"$COMPOSE_DIR\" \"$VHOST_MAIN_FILE\" \"$VHOST_MAIN_LOCATION_FILE\"; do
if [ -e \"$item\" ]; then
sudo rm -rf \"$item\" || handle_error \"Failed to remove $item\"
fi
done
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "a66a034f-970a-4cee-b324-f1b423625bd7",
"name": "Unsuspend",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-660
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ $('Deploy-docker-compose').item.json[\"docker-compose\"] }}'
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
NGINX_MAIN_TEXT='{{ $('nginx').item.json['main'] }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ $('nginx').item.json['main_location'] }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
update_nginx_acl() {
ACL_FILE=$1
LOCATION_FILE=$2
if [ -s \"$ACL_FILE\" ]; then # Проверяем, что файл существует и не пустой
VALID_LINES=$(grep -vE '^\s*$' \"$ACL_FILE\") # Убираем пустые строки
if [ -n \"$VALID_LINES\" ]; then # Если есть непустые строки
while IFS= read -r line; do
echo \"allow $line;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
done <<< \"$VALID_LINES\"
echo \"deny all;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
fi
fi
}
# Create necessary directories with permissions
for dir in \"$COMPOSE_DIR\" \"$NGINX_DIR\" \"$MOUNT_DIR\"; do
sudo mkdir -p \"$dir\" || handle_error \"Failed to create $dir\"
sudo chmod -R 777 \"$dir\" || handle_error \"Failed to set permissions on $dir\"
done
# Check if the image is already mounted using fstab
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add fstab entry for $IMG_FILE\"
fi
# Apply the fstab changes and mount the image
if ! mount | grep -q \"$MOUNT_DIR\"; then
sudo mount -a || handle_error \"Failed to mount image using fstab\"
fi
# Create docker-compose.yml file
echo \"$DOCKER_COMPOSE_TEXT\" | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo \"$NGINX_MAIN_TEXT\" | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo \"$NGINX_MAIN_LOCATION_TEXT\" | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
update_nginx_acl \"$NGINX_MAIN_ACL_FILE\" \"$VHOST_MAIN_LOCATION_FILE\"
# Change to the compose directory
cd \"$COMPOSE_DIR\" || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Start Docker containers using docker-compose
> error.log
if ! sudo docker-compose up -d > error.log 2>&1; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# If everything is successful, update the status file and print success message
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "f05ac0c4-61b6-4546-9a39-1ef03e360c14",
"name": "Mount Disk",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1200,
460
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
# Create necessary directories with permissions
sudo mkdir -p \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $MOUNT_DIR\"
sudo chmod 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"
if df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is mounted to $MOUNT_DIR\"
fi
if ! grep -q \"$IMG_FILE\" /etc/fstab; then
echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add entry to /etc/fstab\"
fi
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "803652a7-b75c-4e58-a935-7899ef98815b",
"name": "Unmount Disk",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1080,
560
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"
STATUS_FILE=\"$COMPOSE_DIR/status.json\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
echo \"error: $1\"
exit 1
}
if ! df -h | grep -q \"$MOUNT_DIR\"; then
handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"
fi
# Remove the mount entry from /etc/fstab if it exists
if grep -q \"$IMG_FILE\" /etc/fstab; then
sudo sed -i \"\|$(printf '%s\n' \"$IMG_FILE\" | sed 's/[.[\*^$]/\\&/g')|d\" /etc/fstab
fi
# Unmount the image if it is mounted (using fstab)
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Remove the mount directory (if needed)
if ! sudo rm -rf \"$MOUNT_DIR\" > /dev/null 2>&1; then
handle_error \"Failed to remove $MOUNT_DIR\"
fi
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "72f8b5bd-0826-4c21-b201-8714969bb6a4",
"name": "Log",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1360,
-160
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
LOGS_JSON=\"{}\"
# Function to return error in JSON format
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if the container exists
if ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then
handle_error \"Container $CONTAINER_NAME not found\"
fi
# Get logs of the container
LOGS=$(sudo docker logs --tail 1000 \"$CONTAINER_NAME\" 2>&1)
if [ $? -ne 0 ]; then
handle_error \"Failed to retrieve logs for $CONTAINER_NAME\"
fi
# Escape double quotes in logs for valid JSON
LOGS_ESCAPED=$(echo \"$LOGS\" | sed 's/\"/\\\"/g' | sed ':a;N;$!ba;s/\n/\\n/g')
# Format logs as JSON
LOGS_JSON=\"{\\"logs\\": \\"$LOGS_ESCAPED\\"}\"
echo \"$LOGS_JSON\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "c170dd11-18bf-41c9-a07c-2f0c9d0fdb01",
"name": "ChangePackage",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-660,
-300
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
COMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"
STATUS_FILE=\"$COMPOSE_DIR/status\"
IMG_FILE=\"$COMPOSE_DIR/data.img\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
MOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"
DOCKER_COMPOSE_TEXT='{{ $('Deploy-docker-compose').item.json[\"docker-compose\"] }}'
NGINX_MAIN_TEXT='{{ $('nginx').item.json['main'] }}'
NGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"
VHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"
NGINX_MAIN_LOCATION_TEXT='{{ $('nginx').item.json['main_location'] }}'
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
DISK_SIZE=\"{{ $('API').item.json.body.disk }}\"
# Function to log an error, write to status file, and print to console
handle_error() {
STATUS_JSON=\"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"error: $1\"
exit 1
}
# Check if the compose file exists before stopping the container
if [ -f \"$COMPOSE_FILE\" ]; then
sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1 || handle_error \"Failed to stop containers\"
else
handle_error \"docker-compose.yml not found\"
fi
# Unmount the image if it is currently mounted
if mount | grep -q \"$MOUNT_DIR\"; then
sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"
fi
# Create docker-compose.yml file
echo \"$DOCKER_COMPOSE_TEXT\" | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"
# Create NGINX configuration files
echo \"$NGINX_MAIN_TEXT\" | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"
echo \"$NGINX_MAIN_LOCATION_TEXT\" | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"
# Resize the disk image if it exists
if [ -f \"$IMG_FILE\" ]; then
sudo truncate -s \"$DISK_SIZE\"G \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to resize $IMG_FILE (truncate)\"
sudo e2fsck -fy \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Filesystem check failed on $IMG_FILE\"
sudo resize2fs \"$IMG_FILE\" > /dev/null 2>&1 || handle_error \"Failed to resize filesystem on $IMG_FILE\"
else
handle_error \"Disk image $IMG_FILE does not exist\"
fi
# Mount the disk only if it is not already mounted
if ! mount | grep -q \"$MOUNT_DIR\"; then
sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"
fi
# Change to the compose directory
cd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"
# Copy NGINX configuration files instead of creating symbolic links
sudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"
sudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
# Start Docker containers using docker-compose
if ! sudo docker-compose up -d > /dev/null 2>error.log; then
ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log
handle_error \"Docker-compose failed: $ERROR_MSG\"
fi
# Update status file
echo \"active\" | sudo tee \"$STATUS_FILE\" > /dev/null
echo \"success\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "da927c27-bc27-4f1f-bd06-90cb273a423d",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
-2640,
-1260
],
"parameters": {
"color": 6,
"width": 639,
"height": 909,
"content": "## 👋 Welcome to PUQ Docker n8n deploy!
## Template for n8n: API Backend for WHMCS/WISECP by PUQcloud
v.1
This is an n8n template that creates an API backend for the WHMCS/WISECP module developed by PUQcloud.
## Setup Instructions
### 1. Configure API Webhook and SSH Access
- Create a Credential (Basic Auth) for the **Webhook API Block** in n8n.
- Create a Credential for **SSH access** to a server with Docker installed (**SSH Block**).
### 2. Modify Template Parameters
In the **Parameters** block of the template, update the following settings:
- `server_domain` – must match the domain of the WHMCS/WISECP Docker server.
- `clients_dir` – directory where user data related to Docker and disks will be stored.
- `mount_dir` – default mount point for the container disk (recommended not to change).
**Do not modify** the following technical parameters:
- `screen_left`
- `screen_right`
## Additional Resources
- Documentation: [https://doc.puq.info/books/docker-n8n-whmcs-module/page/setting-up-n8n-workflow](https://doc.puq.info/books/docker-n8n-whmcs-module/page/setting-up-n8n-workflow)
- WHMCS module: [https://puqcloud.com/whmcs-module-docker-n8n.php](https://puqcloud.com/whmcs-module-docker-n8n.php)
"
},
"typeVersion": 1
},
{
"id": "7d1edb5c-9836-4dad-9116-bd14a5e7ab2c",
"name": "n8n",
"type": "n8n-nodes-base.switch",
"position": [
-1680,
1380
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "version",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_version"
}
]
},
"renameOutput": true
},
{
"outputKey": "users",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_users"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_password",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "7c862a6f-5df1-499c-b9c6-9b266e2bebec",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_password"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "dcc07564-d2e1-46a5-a1ad-662aff29c681",
"name": "Version",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1180,
1360
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
VERSION_JSON=\"{}\"
# Function to return error in JSON format
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
# Check if the container exists
if ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then
handle_error \"Container $CONTAINER_NAME not found\"
fi
# Get the n8n version from the container
VERSION=$(sudo docker exec \"$CONTAINER_NAME\" n8n --version 2>&1)
if [ $? -ne 0 ]; then
handle_error \"Failed to retrieve version for $CONTAINER_NAME\"
fi
# Escape double quotes in version string for valid JSON
VERSION_ESCAPED=$(echo \"$VERSION\" | sed 's/\"/\\\"/g')
# Format version as JSON
VERSION_JSON=\"{\\"version\\": \\"$VERSION_ESCAPED\\"}\"
echo \"$VERSION_JSON\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "bb6f2cc2-861a-4c6f-9206-b3077fb98d06",
"name": "Users",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1060,
1460
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
DB_PATH=\"/mnt/$CONTAINER_NAME/database.sqlite\"
USERS_JSON=\"{}\"
# Function to return error in JSON format
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
if ! test -f \"$DB_PATH\"; then
handle_error \"Database file $DB_PATH is not found or not mounted on the host\"
fi
USERS=$(sqlite3 \"$DB_PATH\" -json \"SELECT * FROM user;\" 2>&1)
if [ $? -ne 0 ]; then
handle_error \"Failed to retrieve users from database\"
fi
USERS_ESCAPED=$(echo \"$USERS\" | sed 's/\"/\\\"/g')
USERS_JSON=\"{\\"users\\": $USERS}\"
echo \"$USERS_JSON\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "e9a08c32-28e6-461f-a620-869dcc5cb1e5",
"name": "Change Password",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1160,
1560
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
CONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"
USER_EMAIL=\"{{ $('API').item.json.body.user_email }}\"
NEW_PASSWORD=\"{{ $('API').item.json.body.password }}\"
DB_PATH=\"/mnt/$CONTAINER_NAME/database.sqlite\"
# Function to return error in JSON format
handle_error() {
echo \"{\\"status\\": \\"error\\", \\"message\\": \\"$1\\"}\"
exit 1
}
if ! test -f \"$DB_PATH\"; then
handle_error \"Database file $DB_PATH is not found or not mounted on the host\"
fi
# Generate bcrypt hash of the new password
HASH=$(htpasswd -bnB \"\" \"$NEW_PASSWORD\" | cut -c2-)
if [ -z \"$HASH\" ]; then
handle_error \"Failed to generate password hash\"
fi
# Update password in the database
sudo sqlite3 \"$DB_PATH\" \"UPDATE user SET password = '$HASH' WHERE email = '$USER_EMAIL';\"
if [ $? -ne 0 ]; then
handle_error \"Failed to update password in database\"
fi
echo \"{\\"status\\": \\"success\\"}\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "60fafb1e-0f67-4759-8625-b08409c5b462",
"name": "If1",
"type": "n8n-nodes-base.if",
"position": [
-1940,
-1100
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "8602bd4c-9693-4d5f-9e7d-5ee62210baca",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "create"
},
{
"id": "1c630b59-0e5a-441d-8aa5-70b31338d897",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_package"
},
{
"id": "b3eb7052-a70f-438e-befd-8c5240df32c7",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "unsuspend"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "913d96ae-bd47-4db2-b83b-68c49b51b33a",
"name": "nginx",
"type": "n8n-nodes-base.set",
"position": [
-1740,
-1240
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "main",
"type": "string",
"value": "=ignore_invalid_headers off;
client_max_body_size 0;
proxy_buffering off;
proxy_request_buffering off;"
},
{
"id": "6507763a-21d4-4ff0-84d2-5dc9d21b7430",
"name": "main_location",
"type": "string",
"value": "=# Custom header
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "5172c533-170f-4f08-95af-de090347c832",
"name": "Deploy-docker-compose",
"type": "n8n-nodes-base.set",
"position": [
-1500,
-1240
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "docker-compose",
"type": "string",
"value": "=version: \"3\"
services:
n8n-{{ $('API').item.json.body.domain }}:
image: n8nio/n8n
restart: unless-stopped
container_name: {{ $('API').item.json.body.domain }}
environment:
- VIRTUAL_HOST={{ $('API').item.json.body.domain }}
- LETSENCRYPT_HOST={{ $('API').item.json.body.domain }}
- WEBHOOK_URL=https://{{ $('API').item.json.body.domain }}
volumes:
- {{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}:/home/node/.n8n
networks:
- nginx-proxy_web
mem_limit: {{ $('API').item.json.body.ram }}G
cpus: \"{{ $('API').item.json.body.cpu }}\"
networks:
nginx-proxy_web:
external: true
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "a9a52bcd-b9b8-436e-a2cc-1f11747626aa",
"name": "GET ACL",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1200,
680
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
# Read files if they exist, else assign empty array
if [[ -f \"$NGINX_MAIN_ACL_FILE\" ]]; then
MAIN_IPS=$(cat \"$NGINX_MAIN_ACL_FILE\" | jq -R -s 'split(\"\n\") | map(select(length > 0))')
else
MAIN_IPS=\"[]\"
fi
# Output JSON
echo \"{ \\"main_ips\\": $MAIN_IPS }\"
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "b6535cd0-2264-4ede-9da6-03c128d54682",
"name": "SET ACL",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1100,
800
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
VHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"
NGINX_MAIN_ACL_FILE=\"$NGINX_DIR/$DOMAIN\"_acl
NGINX_MAIN_ACL_TEXT=\"{{ $('API').item.json.body.main_ips }}\"
VHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location
NGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
update_nginx_acl() {
ACL_FILE=$1
LOCATION_FILE=$2
if [ -s \"$ACL_FILE\" ]; then
VALID_LINES=$(grep -vE '^\s*$' \"$ACL_FILE\")
if [ -n \"$VALID_LINES\" ]; then
while IFS= read -r line; do
echo \"allow $line;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
done <<< \"$VALID_LINES\"
echo \"deny all;\" | sudo tee -a \"$LOCATION_FILE\" > /dev/null || handle_error \"Failed to update $LOCATION_FILE\"
fi
fi
}
# Create or overwrite the file with the content from variables
echo \"$NGINX_MAIN_ACL_TEXT\" | sudo tee \"$NGINX_MAIN_ACL_FILE\" > /dev/null
sudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"
sudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"
update_nginx_acl \"$NGINX_MAIN_ACL_FILE\" \"$VHOST_MAIN_LOCATION_FILE\"
# Reload Nginx with sudo
if sudo docker exec nginx-proxy nginx -s reload; then
echo \"success\"
else
handle_error \"Failed to reload Nginx.\"
fi
exit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "5b2452e4-3c2a-42e3-8855-dedd6c8f8ec9",
"name": "GET NET",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
-1220,
900
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash
# Get values for variables from templates
DOMAIN=\"{{ $('API').item.json.body.domain }}\"
COMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"
NGINX_DIR=\"$COMPOSE_DIR/nginx\"
NET_IN_FILE=\"$COMPOSE_DIR/net_in\"
NET_OUT_FILE=\"$COMPOSE_DIR/net_out\"
# Function to log an error and exit
handle_error() {
echo \"error: $1\"
exit 1
}
# Get current network statistics from container
STATS=$(sudo docker exec \"$DOMAIN\" cat /proc/net/dev | grep eth0) || handle_error \"Failed to get network stats\"
NET_IN_NEW=$(echo \"$STATS\" | awk '{print $2}') # RX bytes (received)
NET_OUT_NEW=$(echo \"$STATS\" | awk '{print $10}') # TX bytes (transmitted)
# Ensure directory exists
mkdir -p \"$COMPOSE_DIR\"
# Read old values, create files if they don't exist
if [[ -f \"$NET_IN_FILE\" ]]; then
NET_IN_OLD=$(sudo cat \"$NET_IN_FILE\")
else
NET_IN_OLD=0
fi
if [[ -f \"$NET_OUT_FILE\" ]]; then
NET_OUT_OLD=$(sudo cat \"$NET_OUT_FILE\")
else
NET_OUT_OLD=0
fi
# Save new values
echo \"$NET_IN_NEW\" | sudo tee \"$NET_IN_FILE\" > /dev/null
echo \"$NET_OUT_NEW\" | sudo tee \"$NET_OUT_FILE\" > /dev/null
# Output JSON
echo \"{ \\"net_in_new\\": $NET_IN_NEW, \\"net_out_new\\": $NET_OUT_NEW, \\"net_in_old\\": $NET_IN_OLD, \\"net_out_old\\": $NET_OUT_OLD }\"
exit 0
"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
}
],
"active": true,
"pinData": {},
"settings": {
"timezone": "America/Winnipeg",
"callerPolicy": "workflowsFromSameOwner",
"executionOrder": "v1"
},
"versionId": "08850dfe-ce0d-46e4-8eb7-6d1a830c8971",
"connections": {
"If": {
"main": [
[
{
"node": "Container Stats",
"type": "main",
"index": 0
},
{
"node": "Container Actions",
"type": "main",
"index": 0
},
{
"node": "n8n",
"type": "main",
"index": 0
},
{
"node": "If1",
"type": "main",
"index": 0
}
],
[
{
"node": "422-Invalid server domain",
"type": "main",
"index": 0
}
]
]
},
"API": {
"main": [
[
{
"node": "Parametrs",
"type": "main",
"index": 0
}
],
[]
]
},
"If1": {
"main": [
[
{
"node": "nginx",
"type": "main",
"index": 0
}
],
[
{
"node": "Service Actions",
"type": "main",
"index": 0
}
]
]
},
"Log": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"SSH": {
"main": [
[
{
"node": "Code1",
"type": "main",
"index": 0
}
],
[
{
"node": "Code1",
"type": "main",
"index": 0
}
]
]
},
"n8n": {
"main": [
[
{
"node": "Version",
"type": "main",
"index": 0
}
],
[
{
"node": "Users",
"type": "main",
"index": 0
}
],
[
{
"node": "Change Password",
"type": "main",
"index": 0
}
]
]
},
"Stat": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Stop": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Code1": {
"main": [
[
{
"node": "API answer",
"type": "main",
"index": 0
}
]
]
},
"Start": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Users": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"nginx": {
"main": [
[
{
"node": "Deploy-docker-compose",
"type": "main",
"index": 0
}
]
]
},
"Deploy": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"GET ACL": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"GET NET": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Inspect": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"SET ACL": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Suspend": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Version": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Parametrs": {
"main": [
[
{
"node": "If",
"type": "main",
"index": 0
}
]
]
},
"Unsuspend": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Mount Disk": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Terminated": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Unmount Disk": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"ChangePackage": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Change Password": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Container Stats": {
"main": [
[
{
"node": "Inspect",
"type": "main",
"index": 0
}
],
[
{
"node": "Stat",
"type": "main",
"index": 0
}
],
[
{
"node": "Log",
"type": "main",
"index": 0
}
]
]
},
"Service Actions": {
"main": [
[
{
"node": "Test Connection1",
"type": "main",
"index": 0
}
],
[
{
"node": "Deploy",
"type": "main",
"index": 0
}
],
[
{
"node": "Suspend",
"type": "main",
"index": 0
}
],
[
{
"node": "Unsuspend",
"type": "main",
"index": 0
}
],
[
{
"node": "Terminated",
"type": "main",
"index": 0
}
],
[
{
"node": "ChangePackage",
"type": "main",
"index": 0
}
]
]
},
"Test Connection1": {
"main": [
[
{
"node": "SSH",
"type": "main",
"index": 0
}
]
]
},
"Container Actions": {
"main": [
[
{
"node": "Start",
"type": "main",
"index": 0
}
],
[
{
"node": "Stop",
"type": "main",
"index": 0
}
],
[
{
"node": "Mount Disk",
"type": "main",
"index": 0
}
],
[
{
"node": "Unmount Disk",
"type": "main",
"index": 0
}
],
[
{
"node": "GET ACL",
"type": "main",
"index": 0
}
],
[
{
"node": "SET ACL",
"type": "main",
"index": 0
}
],
[
{
"node": "GET NET",
"type": "main",
"index": 0
}
]
]
},
"Deploy-docker-compose": {
"main": [
[
{
"node": "Service Actions",
"type": "main",
"index": 0
}
]
]
}
}
}
功能特点
- 自动检测新邮件
- AI智能内容分析
- 自定义分类规则
- 批量处理能力
- 详细的处理日志
技术分析
节点类型及作用
- If
- Set
- Webhook
- Respondtowebhook
- Code
复杂度评估
配置难度:
维护难度:
扩展性:
实施指南
前置条件
- 有效的Gmail账户
- n8n平台访问权限
- Google API凭证
- AI分类服务订阅
配置步骤
- 在n8n中导入工作流JSON文件
- 配置Gmail节点的认证信息
- 设置AI分类器的API密钥
- 自定义分类规则和标签映射
- 测试工作流执行
- 配置定时触发器(可选)
关键参数
| 参数名称 | 默认值 | 说明 |
|---|---|---|
| maxEmails | 50 | 单次处理的最大邮件数量 |
| confidenceThreshold | 0.8 | 分类置信度阈值 |
| autoLabel | true | 是否自动添加标签 |
最佳实践
优化建议
- 定期更新AI分类模型以提高准确性
- 根据邮件量调整处理批次大小
- 设置合理的分类置信度阈值
- 定期清理过期的分类规则
安全注意事项
- 妥善保管API密钥和认证信息
- 限制工作流的访问权限
- 定期审查处理日志
- 启用双因素认证保护Gmail账户
性能优化
- 使用增量处理减少重复工作
- 缓存频繁访问的数据
- 并行处理多个邮件分类任务
- 监控系统资源使用情况
故障排除
常见问题
邮件未被正确分类
检查AI分类器的置信度阈值设置,适当降低阈值或更新训练数据。
Gmail认证失败
确认Google API凭证有效且具有正确的权限范围,重新进行OAuth授权。
调试技巧
- 启用详细日志记录查看每个步骤的执行情况
- 使用测试邮件验证分类逻辑
- 检查网络连接和API服务状态
- 逐步执行工作流定位问题节点
错误处理
工作流包含以下错误处理机制:
- 网络超时自动重试(最多3次)
- API错误记录和告警
- 处理失败邮件的隔离机制
- 异常情况下的回滚操作