Add DELETE /vm/destroy/{vmid} - complete lifecycle cleanup (v2.1.1)
This commit is contained in:
parent
de6f053088
commit
e79f7256b5
1 changed files with 131 additions and 1 deletions
132
app.py
132
app.py
|
|
@ -180,7 +180,8 @@ async def root():
|
|||
"vm_list": "GET /vm/list",
|
||||
"vm_create": "POST /vm/create {node, ip, hostname, cores?, memory?, disk?}",
|
||||
"vm_status": "GET /vm/status/{vmid}",
|
||||
"vm_delete": "DELETE /vm/{vmid}",
|
||||
"vm_delete": "DELETE /vm/{vmid} - simple Proxmox delete (legacy)",
|
||||
"vm_destroy": "DELETE /vm/destroy/{vmid}?dry_run=false - complete cleanup (VM, Dockhand, Repo, Ansible, Kuma)",
|
||||
"inventory_add": "POST /inventory/host {name, ip, group?}",
|
||||
"ansible_run": "POST /ansible/run {hostname}",
|
||||
"tts_speak": "POST /tts/speak {text, target: speaker|telegram}",
|
||||
|
|
@ -346,6 +347,7 @@ async def vm_status(vmid: int, _=Depends(_verify)):
|
|||
|
||||
@app.delete("/vm/{vmid}")
|
||||
async def vm_delete(vmid: int, _=Depends(_verify)):
|
||||
"""Simple VM delete - Proxmox only (legacy)."""
|
||||
auth = _pve_auth()
|
||||
async with httpx.AsyncClient(verify=False, timeout=30) as c:
|
||||
nodes = await c.get("https://10.5.85.11:8006/api2/json/nodes", headers={"Authorization": auth})
|
||||
|
|
@ -355,6 +357,134 @@ async def vm_delete(vmid: int, _=Depends(_verify)):
|
|||
return r.json()
|
||||
return JSONResponse({"error": "VM not found"}, status_code=404)
|
||||
|
||||
@app.delete("/vm/destroy/{vmid}")
|
||||
async def vm_destroy_full(vmid: int, _=Depends(_verify), dry_run: bool = Query(False)):
|
||||
"""
|
||||
Complete VM destruction with full cleanup:
|
||||
1. Stop VM (required before destroy)
|
||||
2. Destroy VM (Proxmox)
|
||||
3. Remove from Dockhand (by IP)
|
||||
4. Delete Forgejo repo (by hostname)
|
||||
5. Remove from Ansible inventory
|
||||
6. Remove from Uptime Kuma monitoring
|
||||
|
||||
Returns detailed cleanup report.
|
||||
"""
|
||||
auth = _pve_auth()
|
||||
results = {"vmid": vmid, "dry_run": dry_run, "steps": {}}
|
||||
|
||||
async with httpx.AsyncClient(verify=False, timeout=30) as c:
|
||||
# Step 1: Find VM and get details
|
||||
vm_info = None
|
||||
node_name = None
|
||||
nodes = await c.get("https://10.5.85.11:8006/api2/json/nodes", headers={"Authorization": auth})
|
||||
for n in nodes.json().get("data", []):
|
||||
r = await c.get(f"https://10.5.85.11:8006/api2/json/nodes/{n['node']}/qemu/{vmid}", headers={"Authorization": auth})
|
||||
if r.status_code == 200:
|
||||
vm_info = r.json().get("data", {})
|
||||
node_name = n["node"]
|
||||
break
|
||||
|
||||
if not vm_info:
|
||||
return JSONResponse({"error": f"VM {vmid} not found"}, status_code=404)
|
||||
|
||||
hostname = vm_info.get("name", "")
|
||||
ip = vm_info.get("net0", "").split("=")[-1].split(",")[0] if "net0" in vm_info else ""
|
||||
results["vm_info"] = {"hostname": hostname, "ip": ip, "node": node_name}
|
||||
|
||||
# Step 2: Stop VM (if running)
|
||||
if vm_info.get("status") == "running":
|
||||
if dry_run:
|
||||
results["steps"]["stop_vm"] = {"status": "dry_run", "message": f"Would stop VM {vmid}"}
|
||||
else:
|
||||
r = await c.post(f"https://10.5.85.11:8006/api2/json/nodes/{node_name}/qemu/{vmid}/status/stop", headers={"Authorization": auth})
|
||||
results["steps"]["stop_vm"] = {"status": "ok" if r.status_code == 200 else "failed", "detail": r.json()}
|
||||
if r.status_code != 200:
|
||||
return JSONResponse({"error": f"Failed to stop VM: {r.json()}"}, status_code=500)
|
||||
# Wait for VM to stop
|
||||
await asyncio.sleep(5)
|
||||
else:
|
||||
results["steps"]["stop_vm"] = {"status": "skipped", "message": "VM already stopped"}
|
||||
|
||||
# Step 3: Destroy VM
|
||||
if dry_run:
|
||||
results["steps"]["destroy_vm"] = {"status": "dry_run", "message": f"Would destroy VM {vmid}"}
|
||||
else:
|
||||
r = await c.delete(f"https://10.5.85.11:8006/api2/json/nodes/{node_name}/qemu/{vmid}", headers={"Authorization": auth})
|
||||
results["steps"]["destroy_vm"] = {"status": "ok" if r.status_code == 200 else "failed", "detail": r.json()}
|
||||
|
||||
# Step 4: Remove from Dockhand (by IP)
|
||||
if ip:
|
||||
if dry_run:
|
||||
results["steps"]["dockhand_remove"] = {"status": "dry_run", "message": f"Would remove Dockhand env for IP {ip}"}
|
||||
else:
|
||||
# Find environment by IP
|
||||
envs = await c.get("http://10.4.1.116:3000/api/environments", headers={"Authorization": f"Bearer {BUTLER_TOKEN}"})
|
||||
env_id = None
|
||||
for env in envs.json():
|
||||
if env.get("name", "").lower() == hostname.lower() or env.get("ip") == ip:
|
||||
env_id = env.get("id")
|
||||
break
|
||||
if env_id:
|
||||
r = await c.delete(f"http://10.4.1.116:3000/api/environments/{env_id}", headers={"Authorization": f"Bearer {BUTLER_TOKEN}"})
|
||||
results["steps"]["dockhand_remove"] = {"status": "ok" if r.status_code in [200, 204] else "failed", "env_id": env_id}
|
||||
else:
|
||||
results["steps"]["dockhand_remove"] = {"status": "skipped", "message": "No Dockhand environment found"}
|
||||
else:
|
||||
results["steps"]["dockhand_remove"] = {"status": "skipped", "message": "No IP found"}
|
||||
|
||||
# Step 5: Delete Forgejo repo (by hostname)
|
||||
if hostname:
|
||||
if dry_run:
|
||||
results["steps"]["forgejo_repo_delete"] = {"status": "dry_run", "message": f"Would delete repo sascha/{hostname}"}
|
||||
else:
|
||||
r = await c.delete(f"http://10.4.1.116:8888/forgejo/api/v1/repos/sascha/{hostname}", headers={"Authorization": f"Bearer {BUTLER_TOKEN}"})
|
||||
results["steps"]["forgejo_repo_delete"] = {"status": "ok" if r.status_code in [200, 204] else "not_found", "detail": r.json() if r.status_code != 204 else "deleted"}
|
||||
else:
|
||||
results["steps"]["forgejo_repo_delete"] = {"status": "skipped", "message": "No hostname found"}
|
||||
|
||||
# Step 6: Remove from Ansible inventory
|
||||
if hostname:
|
||||
if dry_run:
|
||||
results["steps"]["ansible_cleanup"] = {"status": "dry_run", "message": f"Would remove {hostname} from pfannkuchen.ini"}
|
||||
else:
|
||||
# Remove host from inventory
|
||||
remove_cmd = f'''python3 -c "
|
||||
lines = open('/app-config/ansible/pfannkuchen.ini').readlines()
|
||||
out = [l for l in lines if '{hostname}' not in l]
|
||||
open('/app-config/ansible/pfannkuchen.ini','w').writelines(out)
|
||||
print('removed')
|
||||
" '''
|
||||
rc, out, err = _ssh(AUTOMATION1, remove_cmd, timeout=30)
|
||||
# Also remove host_vars
|
||||
_ssh(AUTOMATION1, f"rm -rf /app-config/ansible/host_vars/{hostname}", timeout=30)
|
||||
results["steps"]["ansible_cleanup"] = {"status": "ok" if rc == 0 else "failed", "detail": out.strip()}
|
||||
else:
|
||||
results["steps"]["ansible_cleanup"] = {"status": "skipped", "message": "No hostname found"}
|
||||
|
||||
# Step 7: Remove from Uptime Kuma (if monitoring exists)
|
||||
if hostname:
|
||||
if dry_run:
|
||||
results["steps"]["uptime_kuma_remove"] = {"status": "dry_run", "message": f"Would remove monitor for {hostname}"}
|
||||
else:
|
||||
try:
|
||||
# Get all monitors from Kuma (no auth needed for local network)
|
||||
kuma_monitors = await c.get("http://10.200.200.1:3001/api/monitors", timeout=5)
|
||||
for monitor in kuma_monitors.json().get("data", []):
|
||||
if hostname.lower() in monitor.get("name", "").lower():
|
||||
r = await c.delete(f"http://10.200.200.1:3001/api/monitors/{monitor['id']}", timeout=5)
|
||||
results["steps"]["uptime_kuma_remove"] = {"status": "ok" if r.status_code == 200 else "failed", "monitor_id": monitor["id"]}
|
||||
break
|
||||
else:
|
||||
results["steps"]["uptime_kuma_remove"] = {"status": "skipped", "message": "No Kuma monitor found"}
|
||||
except Exception as e:
|
||||
results["steps"]["uptime_kuma_remove"] = {"status": "error", "detail": str(e)}
|
||||
else:
|
||||
results["steps"]["uptime_kuma_remove"] = {"status": "skipped", "message": "No hostname found"}
|
||||
|
||||
_audit(f"/vm/destroy/{vmid}", "DELETE", 200, f"dry_run={dry_run}")
|
||||
return results
|
||||
|
||||
@app.post("/inventory/host")
|
||||
async def inventory_host(request: Request, _=Depends(_verify)):
|
||||
body = await request.json()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue