mirror of
https://github.com/jakejarvis/hoot.git
synced 2025-10-18 14:24:26 -04:00
v2: Postgres storage & Inngest background revalidations (#101)
This commit is contained in:
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22"
|
||||
cache: "pnpm"
|
||||
|
37
README.md
37
README.md
@@ -53,6 +53,43 @@
|
||||
|
||||
---
|
||||
|
||||
## 🔄 v2 Cutover: Postgres + Drizzle + Inngest
|
||||
|
||||
- **Primary store**: Postgres (Neon). All domain sections persist to tables in `server/db/schema.ts` via Drizzle.
|
||||
- **Drizzle**: Schema/migrations in `drizzle/`. Config in `drizzle.config.ts`. Client at `server/db/client.ts`.
|
||||
- **Redis role**: Short-lived locks, rate limiting, and image/report caches only (no primary data). See `lib/cache.ts`, `lib/report-cache.ts`.
|
||||
- **Background jobs (Inngest)**:
|
||||
- `app/api/inngest/route.ts` serves functions.
|
||||
- `section-revalidate`: re-fetch a section for a domain.
|
||||
- `domain-inspected`: fans out per-section revalidation.
|
||||
- `scan-due`: cron to enqueue revalidations for stale rows.
|
||||
- **TTL & freshness**: Policies in `server/db/ttl.ts`. Each service reads from Postgres first and revalidates when stale.
|
||||
- **Services**: `server/services/*` now read/write Postgres via repos in `server/repos/*`.
|
||||
|
||||
### Environment
|
||||
- `DATABASE_URL` (required)
|
||||
- Redis/UploadThing/PostHog remain as before (see `.env.example`).
|
||||
|
||||
### Commands
|
||||
```bash
|
||||
# Drizzle
|
||||
pnpm drizzle:generate
|
||||
pnpm drizzle:migrate
|
||||
|
||||
# Dev / checks / tests
|
||||
pnpm dev
|
||||
pnpm lint
|
||||
pnpm typecheck
|
||||
pnpm test:run
|
||||
```
|
||||
|
||||
### Notes
|
||||
- Provider catalog is seeded from `lib/providers/rules/*` via `server/db/seed/providers.ts`.
|
||||
- Trigram search enabled via `pg_trgm` migration in `drizzle/`.
|
||||
- No back-compat/migration from Redis snapshots; v2 is a clean switch.
|
||||
|
||||
---
|
||||
|
||||
## 📜 License
|
||||
|
||||
[MIT](LICENSE)
|
||||
|
13
app/api/inngest/route.ts
Normal file
13
app/api/inngest/route.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { serve } from "inngest/next";
|
||||
import { inngest } from "@/server/inngest/client";
|
||||
import { domainInspected } from "@/server/inngest/functions/domain-inspected";
|
||||
import { scanDue } from "@/server/inngest/functions/scan-due";
|
||||
import { sectionRevalidate } from "@/server/inngest/functions/section-revalidate";
|
||||
|
||||
// opt out of caching per Inngest docs
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
export const { GET, POST, PUT } = serve({
|
||||
client: inngest,
|
||||
functions: [sectionRevalidate, domainInspected, scanDue],
|
||||
});
|
@@ -7,7 +7,7 @@
|
||||
},
|
||||
"files": {
|
||||
"ignoreUnknown": true,
|
||||
"includes": ["**", "!node_modules", "!.next", "!dist", "!build"]
|
||||
"includes": ["**", "!node_modules", "!.next", "!dist", "!build", "!drizzle"]
|
||||
},
|
||||
"formatter": {
|
||||
"enabled": true,
|
||||
|
@@ -42,7 +42,6 @@ describe("exportDomainData", () => {
|
||||
registrar: { name: "Test Registrar" },
|
||||
warnings: [],
|
||||
unicodeName: "example.com",
|
||||
punycodeName: "example.com",
|
||||
registrarProvider: {
|
||||
name: "Test Registrar",
|
||||
domain: "testregistrar.com",
|
||||
|
10
drizzle.config.ts
Normal file
10
drizzle.config.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import type { Config } from "drizzle-kit";
|
||||
|
||||
export default {
|
||||
schema: "./server/db/schema.ts",
|
||||
out: "./drizzle",
|
||||
dialect: "postgresql",
|
||||
dbCredentials: {
|
||||
url: process.env.DATABASE_URL as string,
|
||||
},
|
||||
} satisfies Config;
|
154
drizzle/0000_nosy_wendell_rand.sql
Normal file
154
drizzle/0000_nosy_wendell_rand.sql
Normal file
@@ -0,0 +1,154 @@
|
||||
CREATE TYPE "public"."dns_record_type" AS ENUM('A', 'AAAA', 'MX', 'TXT', 'NS');--> statement-breakpoint
|
||||
CREATE TYPE "public"."dns_resolver" AS ENUM('cloudflare', 'google');--> statement-breakpoint
|
||||
CREATE TYPE "public"."provider_category" AS ENUM('hosting', 'email', 'dns', 'ca', 'registrar');--> statement-breakpoint
|
||||
CREATE TABLE "certificates" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"domain_id" uuid NOT NULL,
|
||||
"issuer" text NOT NULL,
|
||||
"subject" text NOT NULL,
|
||||
"alt_names" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"valid_from" timestamp with time zone NOT NULL,
|
||||
"valid_to" timestamp with time zone NOT NULL,
|
||||
"ca_provider_id" uuid,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
CONSTRAINT "ck_cert_valid_window" CHECK ("certificates"."valid_to" >= "certificates"."valid_from")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "dns_records" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"domain_id" uuid NOT NULL,
|
||||
"type" "dns_record_type" NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"value" text NOT NULL,
|
||||
"ttl" integer,
|
||||
"priority" integer,
|
||||
"is_cloudflare" boolean,
|
||||
"resolver" "dns_resolver" NOT NULL,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
CONSTRAINT "u_dns_record" UNIQUE("domain_id","type","name","value")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "domains" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"tld" text NOT NULL,
|
||||
"unicode_name" text NOT NULL,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
CONSTRAINT "u_domains_name" UNIQUE("name")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "hosting" (
|
||||
"domain_id" uuid PRIMARY KEY NOT NULL,
|
||||
"hosting_provider_id" uuid,
|
||||
"email_provider_id" uuid,
|
||||
"dns_provider_id" uuid,
|
||||
"geo_city" text,
|
||||
"geo_region" text,
|
||||
"geo_country" text,
|
||||
"geo_country_code" text,
|
||||
"geo_lat" double precision,
|
||||
"geo_lon" double precision,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "http_headers" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"domain_id" uuid NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"value" text NOT NULL,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
CONSTRAINT "u_http_header" UNIQUE("domain_id","name")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "providers" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"category" "provider_category" NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"domain" text,
|
||||
"slug" text NOT NULL,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
CONSTRAINT "u_providers_category_slug" UNIQUE("category","slug")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "registration_nameservers" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"domain_id" uuid NOT NULL,
|
||||
"host" text NOT NULL,
|
||||
"ipv4" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"ipv6" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
CONSTRAINT "u_reg_ns" UNIQUE("domain_id","host")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "registrations" (
|
||||
"domain_id" uuid PRIMARY KEY NOT NULL,
|
||||
"is_registered" boolean NOT NULL,
|
||||
"privacy_enabled" boolean,
|
||||
"registry" text,
|
||||
"creation_date" timestamp with time zone,
|
||||
"updated_date" timestamp with time zone,
|
||||
"expiration_date" timestamp with time zone,
|
||||
"deletion_date" timestamp with time zone,
|
||||
"transfer_lock" boolean,
|
||||
"statuses" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"contacts" jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
"whois_server" text,
|
||||
"rdap_servers" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"source" text NOT NULL,
|
||||
"registrar_provider_id" uuid,
|
||||
"reseller_provider_id" uuid,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "seo" (
|
||||
"domain_id" uuid PRIMARY KEY NOT NULL,
|
||||
"source_final_url" text,
|
||||
"source_status" integer,
|
||||
"meta_open_graph" jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
"meta_twitter" jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
"meta_general" jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
"preview_title" text,
|
||||
"preview_description" text,
|
||||
"preview_image_url" text,
|
||||
"preview_image_uploaded_url" text,
|
||||
"canonical_url" text,
|
||||
"robots" jsonb DEFAULT '{}'::jsonb NOT NULL,
|
||||
"robots_sitemaps" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"errors" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"fetched_at" timestamp with time zone NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "certificates" ADD CONSTRAINT "certificates_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "certificates" ADD CONSTRAINT "certificates_ca_provider_id_providers_id_fk" FOREIGN KEY ("ca_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "dns_records" ADD CONSTRAINT "dns_records_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "hosting" ADD CONSTRAINT "hosting_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "hosting" ADD CONSTRAINT "hosting_hosting_provider_id_providers_id_fk" FOREIGN KEY ("hosting_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "hosting" ADD CONSTRAINT "hosting_email_provider_id_providers_id_fk" FOREIGN KEY ("email_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "hosting" ADD CONSTRAINT "hosting_dns_provider_id_providers_id_fk" FOREIGN KEY ("dns_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "http_headers" ADD CONSTRAINT "http_headers_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "registration_nameservers" ADD CONSTRAINT "registration_nameservers_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "registrations" ADD CONSTRAINT "registrations_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "registrations" ADD CONSTRAINT "registrations_registrar_provider_id_providers_id_fk" FOREIGN KEY ("registrar_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "registrations" ADD CONSTRAINT "registrations_reseller_provider_id_providers_id_fk" FOREIGN KEY ("reseller_provider_id") REFERENCES "public"."providers"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "seo" ADD CONSTRAINT "seo_domain_id_domains_id_fk" FOREIGN KEY ("domain_id") REFERENCES "public"."domains"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
CREATE INDEX "i_certs_domain" ON "certificates" USING btree ("domain_id");--> statement-breakpoint
|
||||
CREATE INDEX "i_certs_valid_to" ON "certificates" USING btree ("valid_to");--> statement-breakpoint
|
||||
CREATE INDEX "i_certs_expires" ON "certificates" USING btree ("expires_at");--> statement-breakpoint
|
||||
CREATE INDEX "i_dns_domain_type" ON "dns_records" USING btree ("domain_id","type");--> statement-breakpoint
|
||||
CREATE INDEX "i_dns_type_value" ON "dns_records" USING btree ("type","value");--> statement-breakpoint
|
||||
CREATE INDEX "i_dns_expires" ON "dns_records" USING btree ("expires_at");--> statement-breakpoint
|
||||
CREATE INDEX "i_domains_tld" ON "domains" USING btree ("tld");--> statement-breakpoint
|
||||
CREATE INDEX "i_hosting_providers" ON "hosting" USING btree ("hosting_provider_id","email_provider_id","dns_provider_id");--> statement-breakpoint
|
||||
CREATE INDEX "i_http_name" ON "http_headers" USING btree ("name");--> statement-breakpoint
|
||||
CREATE INDEX "i_reg_ns_host" ON "registration_nameservers" USING btree ("host");--> statement-breakpoint
|
||||
CREATE INDEX "i_reg_registrar" ON "registrations" USING btree ("registrar_provider_id");--> statement-breakpoint
|
||||
CREATE INDEX "i_reg_expires" ON "registrations" USING btree ("expires_at");--> statement-breakpoint
|
||||
CREATE INDEX "i_seo_src_final_url" ON "seo" USING btree ("source_final_url");--> statement-breakpoint
|
||||
CREATE INDEX "i_seo_canonical" ON "seo" USING btree ("canonical_url");
|
1207
drizzle/meta/0000_snapshot.json
Normal file
1207
drizzle/meta/0000_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
13
drizzle/meta/_journal.json
Normal file
13
drizzle/meta/_journal.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"version": "7",
|
||||
"dialect": "postgresql",
|
||||
"entries": [
|
||||
{
|
||||
"idx": 0,
|
||||
"version": "7",
|
||||
"when": 1760758697218,
|
||||
"tag": "0000_nosy_wendell_rand",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
@@ -6,7 +6,6 @@ export const RegistrationSchema = z.object({
|
||||
domain: z.string(),
|
||||
tld: z.string(),
|
||||
isRegistered: z.boolean(),
|
||||
isIDN: z.boolean().optional(),
|
||||
unicodeName: z.string().optional(),
|
||||
punycodeName: z.string().optional(),
|
||||
registry: z.string().optional(),
|
||||
|
@@ -7,4 +7,5 @@ export * from "./domain/registration";
|
||||
export * from "./domain/seo";
|
||||
export * from "./internal/export";
|
||||
export * from "./internal/provider";
|
||||
export * from "./internal/sections";
|
||||
export * from "./internal/storage";
|
||||
|
12
lib/schemas/internal/sections.ts
Normal file
12
lib/schemas/internal/sections.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { z } from "zod";
|
||||
|
||||
export const SectionEnum = z.enum([
|
||||
"dns",
|
||||
"headers",
|
||||
"hosting",
|
||||
"certificates",
|
||||
"seo",
|
||||
"registration",
|
||||
]);
|
||||
|
||||
export type Section = z.infer<typeof SectionEnum>;
|
27
package.json
27
package.json
@@ -18,6 +18,11 @@
|
||||
"lint": "biome check",
|
||||
"format": "biome format --write",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"db:generate": "drizzle-kit generate",
|
||||
"db:push": "drizzle-kit push",
|
||||
"db:migrate": "drizzle-kit migrate",
|
||||
"db:studio": "drizzle-kit studio",
|
||||
"db:seed:providers": "tsx server/db/seed/providers.ts",
|
||||
"test": "vitest",
|
||||
"test:run": "vitest run",
|
||||
"test:ui": "vitest --ui",
|
||||
@@ -25,35 +30,38 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@date-fns/utc": "^2.1.1",
|
||||
"@posthog/nextjs-config": "^1.3.3",
|
||||
"@neondatabase/serverless": "^1.0.2",
|
||||
"@posthog/nextjs-config": "^1.3.4",
|
||||
"@sparticuz/chromium": "140.0.0",
|
||||
"@tanstack/react-query": "^5.90.3",
|
||||
"@tanstack/react-query": "^5.90.5",
|
||||
"@tanstack/react-query-devtools": "^5.90.2",
|
||||
"@trpc/client": "^11.6.0",
|
||||
"@trpc/server": "^11.6.0",
|
||||
"@trpc/tanstack-react-query": "^11.6.0",
|
||||
"@upstash/redis": "^1.35.6",
|
||||
"@vercel/analytics": "^1.5.0",
|
||||
"@vercel/functions": "^3.1.3",
|
||||
"@vercel/functions": "^3.1.4",
|
||||
"cheerio": "^1.1.2",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.1.1",
|
||||
"country-flag-icons": "^1.5.21",
|
||||
"date-fns": "^4.1.0",
|
||||
"drizzle-orm": "^0.44.6",
|
||||
"geist": "^1.5.1",
|
||||
"icojs": "^0.19.5",
|
||||
"icojs": "^0.20.0",
|
||||
"inngest": "^3.44.3",
|
||||
"ipaddr.js": "^2.2.0",
|
||||
"lucide-react": "^0.545.0",
|
||||
"lucide-react": "^0.546.0",
|
||||
"mapbox-gl": "^3.15.0",
|
||||
"motion": "^12.23.24",
|
||||
"next": "15.6.0-canary.39",
|
||||
"next-themes": "^0.4.6",
|
||||
"posthog-js": "^1.275.2",
|
||||
"posthog-js": "^1.276.0",
|
||||
"posthog-node": "^5.10.0",
|
||||
"puppeteer-core": "24.22.3",
|
||||
"radix-ui": "^1.4.3",
|
||||
"rdapper": "^0.7.0",
|
||||
"rdapper": "^0.8.0",
|
||||
"react": "19.1.1",
|
||||
"react-dom": "19.1.1",
|
||||
"react-map-gl": "^8.1.0",
|
||||
@@ -69,21 +77,24 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.6",
|
||||
"@electric-sql/pglite": "^0.3.11",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@testing-library/dom": "10.4.1",
|
||||
"@testing-library/jest-dom": "6.9.1",
|
||||
"@testing-library/react": "16.3.0",
|
||||
"@testing-library/user-event": "14.6.1",
|
||||
"@types/node": "24.7.2",
|
||||
"@types/node": "24.8.1",
|
||||
"@types/react": "19.1.16",
|
||||
"@types/react-dom": "19.1.9",
|
||||
"@vitejs/plugin-react": "^5.0.4",
|
||||
"@vitest/coverage-v8": "^3.2.4",
|
||||
"@vitest/ui": "^3.2.4",
|
||||
"babel-plugin-react-compiler": "19.1.0-rc.3",
|
||||
"drizzle-kit": "^0.31.5",
|
||||
"jsdom": "^27.0.0",
|
||||
"puppeteer": "24.22.3",
|
||||
"tailwindcss": "^4.1.14",
|
||||
"tsx": "^4.20.6",
|
||||
"tw-animate-css": "^1.4.0",
|
||||
"typescript": "5.9.3",
|
||||
"vite-tsconfig-paths": "^5.1.4",
|
||||
|
2662
pnpm-lock.yaml
generated
2662
pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -4,5 +4,6 @@ onlyBuiltDependencies:
|
||||
- core-js
|
||||
- esbuild
|
||||
- msgpackr-extract
|
||||
- protobufjs
|
||||
- puppeteer
|
||||
- sharp
|
||||
|
12
server/db/client.ts
Normal file
12
server/db/client.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { Pool } from "@neondatabase/serverless";
|
||||
import { drizzle } from "drizzle-orm/neon-serverless";
|
||||
import * as schema from "@/server/db/schema";
|
||||
|
||||
const connectionString = process.env.DATABASE_URL;
|
||||
if (!connectionString) {
|
||||
// Throw at import time so we fail fast on misconfiguration in server-only context
|
||||
throw new Error("DATABASE_URL is not set");
|
||||
}
|
||||
|
||||
const pool = new Pool({ connectionString });
|
||||
export const db = drizzle(pool, { schema });
|
20
server/db/pglite.ts
Normal file
20
server/db/pglite.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import { PGlite } from "@electric-sql/pglite";
|
||||
import { drizzle } from "drizzle-orm/pglite";
|
||||
import * as schema from "@/server/db/schema";
|
||||
|
||||
// Dynamic import via require pattern is recommended in community examples
|
||||
// to access drizzle-kit/api in Vitest.
|
||||
const { pushSchema } =
|
||||
require("drizzle-kit/api") as typeof import("drizzle-kit/api");
|
||||
|
||||
export async function makePGliteDb() {
|
||||
const client = new PGlite();
|
||||
const db = drizzle(client, { schema });
|
||||
const { apply } = await pushSchema(
|
||||
schema,
|
||||
// biome-ignore lint/suspicious/noExplicitAny: ignore type mismatch
|
||||
db as any,
|
||||
);
|
||||
await apply();
|
||||
return { db, client };
|
||||
}
|
267
server/db/schema.ts
Normal file
267
server/db/schema.ts
Normal file
@@ -0,0 +1,267 @@
|
||||
import { sql } from "drizzle-orm";
|
||||
import {
|
||||
boolean,
|
||||
check,
|
||||
doublePrecision,
|
||||
index,
|
||||
integer,
|
||||
jsonb,
|
||||
pgEnum,
|
||||
pgTable,
|
||||
text,
|
||||
timestamp,
|
||||
unique,
|
||||
uuid,
|
||||
} from "drizzle-orm/pg-core";
|
||||
import type { Registration } from "@/lib/schemas";
|
||||
|
||||
// Enums
|
||||
export const providerCategory = pgEnum("provider_category", [
|
||||
"hosting",
|
||||
"email",
|
||||
"dns",
|
||||
"ca",
|
||||
"registrar",
|
||||
]);
|
||||
export const dnsRecordType = pgEnum("dns_record_type", [
|
||||
"A",
|
||||
"AAAA",
|
||||
"MX",
|
||||
"TXT",
|
||||
"NS",
|
||||
]);
|
||||
export const dnsResolver = pgEnum("dns_resolver", ["cloudflare", "google"]);
|
||||
|
||||
// Providers
|
||||
export const providers = pgTable(
|
||||
"providers",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
category: providerCategory("category").notNull(),
|
||||
name: text("name").notNull(),
|
||||
domain: text("domain"),
|
||||
slug: text("slug").notNull(),
|
||||
createdAt: timestamp("created_at", { withTimezone: true })
|
||||
.defaultNow()
|
||||
.notNull(),
|
||||
updatedAt: timestamp("updated_at", { withTimezone: true })
|
||||
.defaultNow()
|
||||
.notNull(),
|
||||
},
|
||||
(t) => [unique("u_providers_category_slug").on(t.category, t.slug)],
|
||||
);
|
||||
|
||||
// Domains
|
||||
export const domains = pgTable(
|
||||
"domains",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
name: text("name").notNull(),
|
||||
tld: text("tld").notNull(),
|
||||
unicodeName: text("unicode_name").notNull(),
|
||||
createdAt: timestamp("created_at", { withTimezone: true })
|
||||
.defaultNow()
|
||||
.notNull(),
|
||||
updatedAt: timestamp("updated_at", { withTimezone: true })
|
||||
.defaultNow()
|
||||
.notNull(),
|
||||
},
|
||||
(t) => [
|
||||
unique("u_domains_name").on(t.name),
|
||||
index("i_domains_tld").on(t.tld),
|
||||
],
|
||||
);
|
||||
|
||||
// Registration (snapshot)
|
||||
export const registrations = pgTable(
|
||||
"registrations",
|
||||
{
|
||||
domainId: uuid("domain_id")
|
||||
.primaryKey()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
isRegistered: boolean("is_registered").notNull(),
|
||||
privacyEnabled: boolean("privacy_enabled"),
|
||||
registry: text("registry"),
|
||||
creationDate: timestamp("creation_date", { withTimezone: true }),
|
||||
updatedDate: timestamp("updated_date", { withTimezone: true }),
|
||||
expirationDate: timestamp("expiration_date", { withTimezone: true }),
|
||||
deletionDate: timestamp("deletion_date", { withTimezone: true }),
|
||||
transferLock: boolean("transfer_lock"),
|
||||
statuses: jsonb("statuses")
|
||||
.$type<Registration["statuses"]>()
|
||||
.notNull()
|
||||
.default(sql`'[]'::jsonb`),
|
||||
contacts: jsonb("contacts")
|
||||
.$type<{ contacts?: Registration["contacts"] }>()
|
||||
.notNull()
|
||||
.default(sql`'{}'::jsonb`),
|
||||
whoisServer: text("whois_server"),
|
||||
rdapServers: jsonb("rdap_servers")
|
||||
.$type<string[]>()
|
||||
.notNull()
|
||||
.default(sql`'[]'::jsonb`),
|
||||
source: text("source").notNull(),
|
||||
registrarProviderId: uuid("registrar_provider_id").references(
|
||||
() => providers.id,
|
||||
),
|
||||
resellerProviderId: uuid("reseller_provider_id").references(
|
||||
() => providers.id,
|
||||
),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
index("i_reg_registrar").on(t.registrarProviderId),
|
||||
index("i_reg_expires").on(t.expiresAt),
|
||||
],
|
||||
);
|
||||
|
||||
export const registrationNameservers = pgTable(
|
||||
"registration_nameservers",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
domainId: uuid("domain_id")
|
||||
.notNull()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
host: text("host").notNull(),
|
||||
ipv4: jsonb("ipv4").$type<string[]>().notNull().default(sql`'[]'::jsonb`),
|
||||
ipv6: jsonb("ipv6").$type<string[]>().notNull().default(sql`'[]'::jsonb`),
|
||||
},
|
||||
(t) => [
|
||||
unique("u_reg_ns").on(t.domainId, t.host),
|
||||
index("i_reg_ns_host").on(t.host),
|
||||
],
|
||||
);
|
||||
|
||||
// DNS (per-record rows)
|
||||
export const dnsRecords = pgTable(
|
||||
"dns_records",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
domainId: uuid("domain_id")
|
||||
.notNull()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
type: dnsRecordType("type").notNull(),
|
||||
name: text("name").notNull(),
|
||||
value: text("value").notNull(),
|
||||
ttl: integer("ttl"),
|
||||
priority: integer("priority"),
|
||||
isCloudflare: boolean("is_cloudflare"),
|
||||
resolver: dnsResolver("resolver").notNull(),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
unique("u_dns_record").on(t.domainId, t.type, t.name, t.value),
|
||||
index("i_dns_domain_type").on(t.domainId, t.type),
|
||||
index("i_dns_type_value").on(t.type, t.value),
|
||||
index("i_dns_expires").on(t.expiresAt),
|
||||
],
|
||||
);
|
||||
|
||||
// TLS certificates (latest)
|
||||
export const certificates = pgTable(
|
||||
"certificates",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
domainId: uuid("domain_id")
|
||||
.notNull()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
issuer: text("issuer").notNull(),
|
||||
subject: text("subject").notNull(),
|
||||
altNames: jsonb("alt_names").notNull().default(sql`'[]'::jsonb`),
|
||||
validFrom: timestamp("valid_from", { withTimezone: true }).notNull(),
|
||||
validTo: timestamp("valid_to", { withTimezone: true }).notNull(),
|
||||
caProviderId: uuid("ca_provider_id").references(() => providers.id),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
index("i_certs_domain").on(t.domainId),
|
||||
index("i_certs_valid_to").on(t.validTo),
|
||||
index("i_certs_expires").on(t.expiresAt),
|
||||
// Ensure validTo >= validFrom
|
||||
check("ck_cert_valid_window", sql`${t.validTo} >= ${t.validFrom}`),
|
||||
// GIN on alt_names via raw migration
|
||||
],
|
||||
);
|
||||
|
||||
// HTTP headers (latest set)
|
||||
export const httpHeaders = pgTable(
|
||||
"http_headers",
|
||||
{
|
||||
id: uuid("id").primaryKey().defaultRandom(),
|
||||
domainId: uuid("domain_id")
|
||||
.notNull()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
name: text("name").notNull(),
|
||||
value: text("value").notNull(),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
unique("u_http_header").on(t.domainId, t.name),
|
||||
index("i_http_name").on(t.name),
|
||||
],
|
||||
);
|
||||
|
||||
// Hosting (latest)
|
||||
export const hosting = pgTable(
|
||||
"hosting",
|
||||
{
|
||||
domainId: uuid("domain_id")
|
||||
.primaryKey()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
hostingProviderId: uuid("hosting_provider_id").references(
|
||||
() => providers.id,
|
||||
),
|
||||
emailProviderId: uuid("email_provider_id").references(() => providers.id),
|
||||
dnsProviderId: uuid("dns_provider_id").references(() => providers.id),
|
||||
geoCity: text("geo_city"),
|
||||
geoRegion: text("geo_region"),
|
||||
geoCountry: text("geo_country"),
|
||||
geoCountryCode: text("geo_country_code"),
|
||||
geoLat: doublePrecision("geo_lat"),
|
||||
geoLon: doublePrecision("geo_lon"),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
index("i_hosting_providers").on(
|
||||
t.hostingProviderId,
|
||||
t.emailProviderId,
|
||||
t.dnsProviderId,
|
||||
),
|
||||
],
|
||||
);
|
||||
|
||||
// SEO (latest)
|
||||
export const seo = pgTable(
|
||||
"seo",
|
||||
{
|
||||
domainId: uuid("domain_id")
|
||||
.primaryKey()
|
||||
.references(() => domains.id, { onDelete: "cascade" }),
|
||||
sourceFinalUrl: text("source_final_url"),
|
||||
sourceStatus: integer("source_status"),
|
||||
metaOpenGraph: jsonb("meta_open_graph").notNull().default(sql`'{}'::jsonb`),
|
||||
metaTwitter: jsonb("meta_twitter").notNull().default(sql`'{}'::jsonb`),
|
||||
metaGeneral: jsonb("meta_general").notNull().default(sql`'{}'::jsonb`),
|
||||
previewTitle: text("preview_title"),
|
||||
previewDescription: text("preview_description"),
|
||||
previewImageUrl: text("preview_image_url"),
|
||||
previewImageUploadedUrl: text("preview_image_uploaded_url"),
|
||||
canonicalUrl: text("canonical_url"),
|
||||
robots: jsonb("robots").notNull().default(sql`'{}'::jsonb`),
|
||||
robotsSitemaps: jsonb("robots_sitemaps")
|
||||
.notNull()
|
||||
.default(sql`'[]'::jsonb`),
|
||||
errors: jsonb("errors").notNull().default(sql`'[]'::jsonb`),
|
||||
fetchedAt: timestamp("fetched_at", { withTimezone: true }).notNull(),
|
||||
expiresAt: timestamp("expires_at", { withTimezone: true }).notNull(),
|
||||
},
|
||||
(t) => [
|
||||
index("i_seo_src_final_url").on(t.sourceFinalUrl),
|
||||
index("i_seo_canonical").on(t.canonicalUrl),
|
||||
],
|
||||
);
|
61
server/db/seed/providers.ts
Normal file
61
server/db/seed/providers.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { CA_PROVIDERS } from "@/lib/providers/rules/certificate";
|
||||
import { DNS_PROVIDERS } from "@/lib/providers/rules/dns";
|
||||
import { EMAIL_PROVIDERS } from "@/lib/providers/rules/email";
|
||||
import { HOSTING_PROVIDERS } from "@/lib/providers/rules/hosting";
|
||||
import { REGISTRAR_PROVIDERS } from "@/lib/providers/rules/registrar";
|
||||
import { db } from "@/server/db/client";
|
||||
import { type providerCategory, providers } from "@/server/db/schema";
|
||||
|
||||
function slugify(input: string): string {
|
||||
return input
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+/g, "-")
|
||||
.replace(/(^-|-$)+/g, "");
|
||||
}
|
||||
|
||||
type SeedDef = {
|
||||
name: string;
|
||||
domain: string | null;
|
||||
category: (typeof providerCategory.enumValues)[number];
|
||||
aliases?: string[];
|
||||
};
|
||||
|
||||
function collect(): SeedDef[] {
|
||||
const arr: SeedDef[] = [];
|
||||
const push = (
|
||||
cat: SeedDef["category"],
|
||||
src: { name: string; domain: string }[],
|
||||
) => {
|
||||
for (const p of src)
|
||||
arr.push({ name: p.name, domain: p.domain ?? null, category: cat });
|
||||
};
|
||||
push("dns", DNS_PROVIDERS);
|
||||
push("email", EMAIL_PROVIDERS);
|
||||
push("hosting", HOSTING_PROVIDERS);
|
||||
push("registrar", REGISTRAR_PROVIDERS);
|
||||
push("ca", CA_PROVIDERS);
|
||||
return arr;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const defs = collect();
|
||||
for (const def of defs) {
|
||||
const slug = slugify(def.name);
|
||||
await db
|
||||
.insert(providers)
|
||||
.values({
|
||||
name: def.name,
|
||||
domain: def.domain ?? undefined,
|
||||
category: def.category,
|
||||
slug,
|
||||
})
|
||||
.onConflictDoNothing({ target: [providers.category, providers.slug] });
|
||||
}
|
||||
console.log(`Seeded ${defs.length} provider rows (existing skipped).`);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
50
server/db/ttl.test.ts
Normal file
50
server/db/ttl.test.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
ttlForCertificates,
|
||||
ttlForDnsRecord,
|
||||
ttlForRegistration,
|
||||
} from "@/server/db/ttl";
|
||||
|
||||
describe("TTL policy", () => {
|
||||
it("registration: 24h when registered and far from expiry", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const exp = new Date("2024-02-01T00:00:00.000Z");
|
||||
const d = ttlForRegistration(now, true, exp);
|
||||
expect(d.getTime() - now.getTime()).toBe(24 * 60 * 60 * 1000);
|
||||
});
|
||||
|
||||
it("registration: 6h when unregistered", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const d = ttlForRegistration(now, false, null);
|
||||
expect(d.getTime() - now.getTime()).toBe(6 * 60 * 60 * 1000);
|
||||
});
|
||||
|
||||
it("registration: <=1h when expiry within 7d", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const exp = new Date("2024-01-05T00:00:00.000Z");
|
||||
const d = ttlForRegistration(now, true, exp);
|
||||
expect(d.getTime() - now.getTime()).toBe(60 * 60 * 1000);
|
||||
});
|
||||
|
||||
it("dns: default 1h when ttl missing", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const d = ttlForDnsRecord(now, undefined);
|
||||
expect(d.getTime() - now.getTime()).toBe(60 * 60 * 1000);
|
||||
});
|
||||
|
||||
it("dns: cap at 24h", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const d = ttlForDnsRecord(now, 3 * 24 * 60 * 60);
|
||||
expect(d.getTime() - now.getTime()).toBe(24 * 60 * 60 * 1000);
|
||||
});
|
||||
|
||||
it("certs: before valid_to and within 24h window", () => {
|
||||
const now = new Date("2024-01-01T00:00:00.000Z");
|
||||
const validTo = new Date("2024-01-04T00:00:00.000Z");
|
||||
const d = ttlForCertificates(now, validTo);
|
||||
// min(now+24h, valid_to-48h) => valid_to-48h here (Jan 2)
|
||||
expect(d.toISOString()).toBe(
|
||||
new Date("2024-01-02T00:00:00.000Z").toISOString(),
|
||||
);
|
||||
});
|
||||
});
|
55
server/db/ttl.ts
Normal file
55
server/db/ttl.ts
Normal file
@@ -0,0 +1,55 @@
|
||||
export function addSeconds(base: Date, seconds: number): Date {
|
||||
return new Date(base.getTime() + seconds * 1000);
|
||||
}
|
||||
|
||||
export function clampFuture(min: Date, max: Date): Date {
|
||||
return new Date(
|
||||
Math.min(Math.max(min.getTime(), Date.now() + 60_000), max.getTime()),
|
||||
);
|
||||
}
|
||||
|
||||
export function ttlForRegistration(
|
||||
now: Date,
|
||||
isRegistered: boolean,
|
||||
expirationDate?: Date | null,
|
||||
): Date {
|
||||
if (expirationDate) {
|
||||
const msUntil = expirationDate.getTime() - now.getTime();
|
||||
if (msUntil <= 7 * 24 * 60 * 60 * 1000) {
|
||||
// Revalidate more aggressively near expiry
|
||||
return addSeconds(now, 60 * 60); // 1h
|
||||
}
|
||||
}
|
||||
return addSeconds(now, isRegistered ? 24 * 60 * 60 : 6 * 60 * 60);
|
||||
}
|
||||
|
||||
export function ttlForDnsRecord(now: Date, ttlSeconds?: number | null): Date {
|
||||
const ttl =
|
||||
typeof ttlSeconds === "number" && ttlSeconds > 0
|
||||
? Math.min(ttlSeconds, 24 * 60 * 60)
|
||||
: 60 * 60;
|
||||
return addSeconds(now, ttl);
|
||||
}
|
||||
|
||||
export function ttlForCertificates(now: Date, validTo: Date): Date {
|
||||
// Revalidate certificates within a 24h sliding window, but start checking
|
||||
// more aggressively 48h before expiry to catch upcoming expirations.
|
||||
const window = addSeconds(now, 24 * 60 * 60);
|
||||
const revalidateBefore = new Date(validTo.getTime() - 48 * 60 * 60 * 1000);
|
||||
return clampFuture(
|
||||
addSeconds(now, 60 * 60),
|
||||
new Date(Math.min(window.getTime(), revalidateBefore.getTime())),
|
||||
);
|
||||
}
|
||||
|
||||
export function ttlForHeaders(now: Date): Date {
|
||||
return addSeconds(now, 12 * 60 * 60);
|
||||
}
|
||||
|
||||
export function ttlForHosting(now: Date): Date {
|
||||
return addSeconds(now, 24 * 60 * 60);
|
||||
}
|
||||
|
||||
export function ttlForSeo(now: Date): Date {
|
||||
return addSeconds(now, 24 * 60 * 60);
|
||||
}
|
4
server/inngest/client.ts
Normal file
4
server/inngest/client.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
import "server-only";
|
||||
import { Inngest } from "inngest";
|
||||
|
||||
export const inngest = new Inngest({ id: "hoot-app" });
|
29
server/inngest/functions/domain-inspected.ts
Normal file
29
server/inngest/functions/domain-inspected.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import "server-only";
|
||||
import { type Section, SectionEnum } from "@/lib/schemas";
|
||||
import { inngest } from "@/server/inngest/client";
|
||||
|
||||
export const domainInspected = inngest.createFunction(
|
||||
{ id: "domain-inspected" },
|
||||
{ event: "domain/inspected" },
|
||||
async ({ step, event }) => {
|
||||
const { domain, sections: rawSections } = event.data as {
|
||||
domain: string;
|
||||
sections?: string[];
|
||||
};
|
||||
// Validate and filter sections
|
||||
const sections: Section[] = rawSections
|
||||
? rawSections.filter((s): s is Section =>
|
||||
SectionEnum.options.includes(s as Section),
|
||||
)
|
||||
: [];
|
||||
for (const section of sections) {
|
||||
await step.sendEvent("enqueue-section", {
|
||||
name: "section/revalidate",
|
||||
data: {
|
||||
domain: typeof domain === "string" ? domain.trim().toLowerCase() : "",
|
||||
section,
|
||||
},
|
||||
});
|
||||
}
|
||||
},
|
||||
);
|
17
server/inngest/functions/scan-due.test.ts
Normal file
17
server/inngest/functions/scan-due.test.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
describe("scan-due", () => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
globalThis.__redisTestHelper.reset();
|
||||
});
|
||||
|
||||
it("counts due dns rows via db mock", async () => {
|
||||
const { countDueDns } = await import("@/server/inngest/functions/scan-due");
|
||||
const n = await countDueDns(new Date());
|
||||
expect(typeof n).toBe("number");
|
||||
});
|
||||
});
|
147
server/inngest/functions/scan-due.ts
Normal file
147
server/inngest/functions/scan-due.ts
Normal file
@@ -0,0 +1,147 @@
|
||||
import "server-only";
|
||||
import { eq, lte } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import {
|
||||
certificates,
|
||||
dnsRecords,
|
||||
domains,
|
||||
hosting,
|
||||
httpHeaders,
|
||||
registrations,
|
||||
seo,
|
||||
} from "@/server/db/schema";
|
||||
import { inngest } from "@/server/inngest/client";
|
||||
|
||||
export const scanDue = inngest.createFunction(
|
||||
{ id: "scan-due-revalidations" },
|
||||
{ cron: "*/1 * * * *" },
|
||||
async ({ step }) => {
|
||||
const now = new Date();
|
||||
const limit = 200;
|
||||
|
||||
// Fetch due rows with error handling so failures surface with context
|
||||
let dueDns: Array<{ domainId: string; domain: string }>; // dns
|
||||
let dueHeaders: Array<{ domainId: string; domain: string }>; // headers
|
||||
let dueHosting: Array<{ domainId: string; domain: string }>; // hosting
|
||||
let dueCerts: Array<{ domainId: string; domain: string }>; // certificates
|
||||
let dueSeo: Array<{ domainId: string; domain: string }>; // seo
|
||||
let dueReg: Array<{ domainId: string; domain: string }>; // registration
|
||||
try {
|
||||
[dueDns, dueHeaders, dueHosting, dueCerts, dueSeo, dueReg] =
|
||||
await Promise.all([
|
||||
db
|
||||
.select({ domainId: dnsRecords.domainId, domain: domains.name })
|
||||
.from(dnsRecords)
|
||||
.innerJoin(domains, eq(dnsRecords.domainId, domains.id))
|
||||
.where(lte(dnsRecords.expiresAt, now))
|
||||
.limit(limit),
|
||||
db
|
||||
.select({ domainId: httpHeaders.domainId, domain: domains.name })
|
||||
.from(httpHeaders)
|
||||
.innerJoin(domains, eq(httpHeaders.domainId, domains.id))
|
||||
.where(lte(httpHeaders.expiresAt, now))
|
||||
.limit(limit),
|
||||
db
|
||||
.select({ domainId: hosting.domainId, domain: domains.name })
|
||||
.from(hosting)
|
||||
.innerJoin(domains, eq(hosting.domainId, domains.id))
|
||||
.where(lte(hosting.expiresAt, now))
|
||||
.limit(limit),
|
||||
db
|
||||
.select({ domainId: certificates.domainId, domain: domains.name })
|
||||
.from(certificates)
|
||||
.innerJoin(domains, eq(certificates.domainId, domains.id))
|
||||
.where(lte(certificates.expiresAt, now))
|
||||
.limit(limit),
|
||||
db
|
||||
.select({ domainId: seo.domainId, domain: domains.name })
|
||||
.from(seo)
|
||||
.innerJoin(domains, eq(seo.domainId, domains.id))
|
||||
.where(lte(seo.expiresAt, now))
|
||||
.limit(limit),
|
||||
db
|
||||
.select({ domainId: registrations.domainId, domain: domains.name })
|
||||
.from(registrations)
|
||||
.innerJoin(domains, eq(registrations.domainId, domains.id))
|
||||
.where(lte(registrations.expiresAt, now))
|
||||
.limit(limit),
|
||||
]);
|
||||
} catch (error) {
|
||||
console.error("[scan-due] database queries failed", {
|
||||
error,
|
||||
now,
|
||||
limit,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Group sections per domain to deduplicate events
|
||||
const domainsToSections = new Map<string, Set<string>>();
|
||||
const addSection = (
|
||||
domainName: string,
|
||||
_domainId: string,
|
||||
section: string,
|
||||
) => {
|
||||
if (!domainName) return;
|
||||
const key = domainName;
|
||||
const set = domainsToSections.get(key) ?? new Set<string>();
|
||||
set.add(section);
|
||||
domainsToSections.set(key, set);
|
||||
};
|
||||
|
||||
for (const r of dueReg) addSection(r.domain, r.domainId, "registration");
|
||||
for (const r of dueDns) addSection(r.domain, r.domainId, "dns");
|
||||
for (const r of dueHeaders) addSection(r.domain, r.domainId, "headers");
|
||||
for (const r of dueHosting) addSection(r.domain, r.domainId, "hosting");
|
||||
for (const r of dueCerts) addSection(r.domain, r.domainId, "certificates");
|
||||
for (const r of dueSeo) addSection(r.domain, r.domainId, "seo");
|
||||
|
||||
// Enforce a small payload: cap sections per domain (there are <=6 today)
|
||||
const MAX_SECTIONS_PER_DOMAIN = 6;
|
||||
const groupedEvents: Array<{
|
||||
name: string;
|
||||
data: { domain: string; sections: string[] };
|
||||
}> = Array.from(domainsToSections.entries()).map(([domain, sections]) => ({
|
||||
name: "section/revalidate",
|
||||
data: {
|
||||
domain,
|
||||
sections: Array.from(sections).slice(0, MAX_SECTIONS_PER_DOMAIN),
|
||||
},
|
||||
}));
|
||||
|
||||
if (groupedEvents.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Batch events to avoid oversized payloads
|
||||
const BATCH_SIZE = 200;
|
||||
for (let i = 0; i < groupedEvents.length; i += BATCH_SIZE) {
|
||||
const chunk = groupedEvents.slice(i, i + BATCH_SIZE);
|
||||
try {
|
||||
await step.sendEvent(
|
||||
`enqueue-due-${Math.floor(i / BATCH_SIZE)}`,
|
||||
chunk,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("[scan-due] sendEvent failed", {
|
||||
error,
|
||||
batchSize: chunk.length,
|
||||
batchIndex: Math.floor(i / BATCH_SIZE),
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
export async function countDueDns(
|
||||
now: Date = new Date(),
|
||||
limit = 200,
|
||||
): Promise<number> {
|
||||
const rows = await db
|
||||
.select({ domainId: dnsRecords.domainId })
|
||||
.from(dnsRecords)
|
||||
.where(lte(dnsRecords.expiresAt, now))
|
||||
.limit(limit);
|
||||
return rows.length;
|
||||
}
|
111
server/inngest/functions/section-revalidate.test.ts
Normal file
111
server/inngest/functions/section-revalidate.test.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
// Import dns lazily inside tests to avoid module-scope DB client init
|
||||
|
||||
describe("section-revalidate", () => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
globalThis.__redisTestHelper.reset();
|
||||
});
|
||||
|
||||
it("calls dns resolver for dns section", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const dnsMod = await import("@/server/services/dns");
|
||||
const spy = vi
|
||||
.spyOn(dnsMod, "resolveAll")
|
||||
.mockResolvedValue({ records: [], resolver: "cloudflare" });
|
||||
await revalidateSection("example.com", "dns");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
|
||||
it("invokes headers probe", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const mod = await import("@/server/services/headers");
|
||||
const spy = vi.spyOn(mod, "probeHeaders").mockResolvedValue([]);
|
||||
await revalidateSection("example.com", "headers");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
|
||||
it("invokes hosting detect", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const mod = await import("@/server/services/hosting");
|
||||
const spy = vi.spyOn(mod, "detectHosting").mockResolvedValue({
|
||||
hostingProvider: { name: "", domain: null },
|
||||
emailProvider: { name: "", domain: null },
|
||||
dnsProvider: { name: "", domain: null },
|
||||
geo: {
|
||||
city: "",
|
||||
region: "",
|
||||
country: "",
|
||||
country_code: "",
|
||||
lat: null,
|
||||
lon: null,
|
||||
},
|
||||
});
|
||||
await revalidateSection("example.com", "hosting");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
|
||||
it("invokes certificates fetch", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const mod = await import("@/server/services/certificates");
|
||||
const spy = vi.spyOn(mod, "getCertificates").mockResolvedValue([]);
|
||||
await revalidateSection("example.com", "certificates");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
|
||||
it("invokes seo fetch", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const mod = await import("@/server/services/seo");
|
||||
const spy = vi.spyOn(mod, "getSeo").mockResolvedValue({
|
||||
meta: null,
|
||||
robots: null,
|
||||
preview: null,
|
||||
source: { finalUrl: null, status: null },
|
||||
});
|
||||
await revalidateSection("example.com", "seo");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
|
||||
it("invokes registration lookup", async () => {
|
||||
const { revalidateSection } = await import(
|
||||
"@/server/inngest/functions/section-revalidate"
|
||||
);
|
||||
const mod = await import("@/server/services/registration");
|
||||
const spy = vi.spyOn(mod, "getRegistration").mockResolvedValue({
|
||||
domain: "example.com",
|
||||
tld: "com",
|
||||
isRegistered: true,
|
||||
registry: undefined,
|
||||
creationDate: undefined,
|
||||
updatedDate: undefined,
|
||||
expirationDate: undefined,
|
||||
deletionDate: undefined,
|
||||
transferLock: undefined,
|
||||
statuses: [],
|
||||
contacts: [],
|
||||
whoisServer: undefined,
|
||||
rdapServers: [],
|
||||
source: "rdap",
|
||||
registrar: undefined,
|
||||
reseller: undefined,
|
||||
nameservers: [],
|
||||
registrarProvider: { name: "", domain: null },
|
||||
});
|
||||
await revalidateSection("example.com", "registration");
|
||||
expect(spy).toHaveBeenCalledWith("example.com");
|
||||
});
|
||||
});
|
94
server/inngest/functions/section-revalidate.ts
Normal file
94
server/inngest/functions/section-revalidate.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
import "server-only";
|
||||
import { z } from "zod";
|
||||
import { acquireLockOrWaitForResult } from "@/lib/cache";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import { type Section, SectionEnum } from "@/lib/schemas";
|
||||
import { inngest } from "@/server/inngest/client";
|
||||
import { getCertificates } from "@/server/services/certificates";
|
||||
import { resolveAll } from "@/server/services/dns";
|
||||
import { probeHeaders } from "@/server/services/headers";
|
||||
import { detectHosting } from "@/server/services/hosting";
|
||||
import { getRegistration } from "@/server/services/registration";
|
||||
import { getSeo } from "@/server/services/seo";
|
||||
|
||||
const eventDataSchema = z.object({
|
||||
domain: z.string().min(1),
|
||||
section: SectionEnum.optional(),
|
||||
sections: z.array(SectionEnum).optional(),
|
||||
});
|
||||
|
||||
export async function revalidateSection(
|
||||
domain: string,
|
||||
section: Section,
|
||||
): Promise<void> {
|
||||
switch (section) {
|
||||
case "dns":
|
||||
await resolveAll(domain);
|
||||
return;
|
||||
case "headers":
|
||||
await probeHeaders(domain);
|
||||
return;
|
||||
case "hosting":
|
||||
await detectHosting(domain);
|
||||
return;
|
||||
case "certificates":
|
||||
await getCertificates(domain);
|
||||
return;
|
||||
case "seo":
|
||||
await getSeo(domain);
|
||||
return;
|
||||
case "registration":
|
||||
await getRegistration(domain);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
export const sectionRevalidate = inngest.createFunction(
|
||||
{
|
||||
id: "section-revalidate",
|
||||
concurrency: {
|
||||
key: "event.data.domain",
|
||||
limit: 1,
|
||||
},
|
||||
},
|
||||
{ event: "section/revalidate" },
|
||||
async ({ event }) => {
|
||||
const data = eventDataSchema.parse(event.data);
|
||||
const domain = data.domain;
|
||||
const normalizedDomain =
|
||||
typeof domain === "string" ? domain.trim().toLowerCase() : "";
|
||||
|
||||
const sections: Section[] = Array.isArray(data.sections)
|
||||
? data.sections
|
||||
: data.section
|
||||
? [data.section]
|
||||
: [];
|
||||
|
||||
if (sections.length === 0) return;
|
||||
|
||||
for (const section of sections) {
|
||||
const lockKey = ns("lock", "revalidate", section, normalizedDomain);
|
||||
const resultKey = ns("result", "revalidate", section, normalizedDomain);
|
||||
const wait = await acquireLockOrWaitForResult({
|
||||
lockKey,
|
||||
resultKey,
|
||||
lockTtl: 60,
|
||||
});
|
||||
if (!wait.acquired) continue;
|
||||
try {
|
||||
await revalidateSection(normalizedDomain, section);
|
||||
try {
|
||||
await redis.set(
|
||||
resultKey,
|
||||
JSON.stringify({ completedAt: Date.now() }),
|
||||
{ ex: 55 },
|
||||
);
|
||||
} catch {}
|
||||
} finally {
|
||||
try {
|
||||
await redis.del(lockKey);
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
39
server/repos/certificates.ts
Normal file
39
server/repos/certificates.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import "server-only";
|
||||
import type { InferInsertModel } from "drizzle-orm";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { certificates } from "@/server/db/schema";
|
||||
|
||||
type CertificateInsert = InferInsertModel<typeof certificates>;
|
||||
|
||||
export type UpsertCertificatesParams = {
|
||||
domainId: string;
|
||||
chain: Array<
|
||||
Omit<CertificateInsert, "id" | "domainId" | "fetchedAt" | "expiresAt">
|
||||
>;
|
||||
fetchedAt: Date;
|
||||
expiresAt: Date; // policy window for revalidation (not cert validity)
|
||||
};
|
||||
|
||||
export async function replaceCertificates(params: UpsertCertificatesParams) {
|
||||
const { domainId } = params;
|
||||
// Atomic delete and bulk insert in a single transaction
|
||||
await db.transaction(async (tx) => {
|
||||
await tx.delete(certificates).where(eq(certificates.domainId, domainId));
|
||||
if (params.chain.length > 0) {
|
||||
await tx.insert(certificates).values(
|
||||
params.chain.map((c) => ({
|
||||
domainId,
|
||||
issuer: c.issuer,
|
||||
subject: c.subject,
|
||||
altNames: c.altNames,
|
||||
validFrom: c.validFrom,
|
||||
validTo: c.validTo,
|
||||
caProviderId: c.caProviderId ?? null,
|
||||
fetchedAt: params.fetchedAt,
|
||||
expiresAt: params.expiresAt,
|
||||
})),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
98
server/repos/dns.ts
Normal file
98
server/repos/dns.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
import "server-only";
|
||||
import type { InferInsertModel } from "drizzle-orm";
|
||||
import { and, eq, inArray } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import {
|
||||
dnsRecords,
|
||||
type dnsRecordType,
|
||||
type dnsResolver,
|
||||
} from "@/server/db/schema";
|
||||
|
||||
type DnsRecordInsert = InferInsertModel<typeof dnsRecords>;
|
||||
|
||||
export type UpsertDnsParams = {
|
||||
domainId: string;
|
||||
resolver: (typeof dnsResolver.enumValues)[number];
|
||||
fetchedAt: Date;
|
||||
// complete set per type
|
||||
recordsByType: Record<
|
||||
(typeof dnsRecordType.enumValues)[number],
|
||||
Array<
|
||||
Omit<
|
||||
DnsRecordInsert,
|
||||
"id" | "domainId" | "type" | "resolver" | "fetchedAt"
|
||||
>
|
||||
>
|
||||
>;
|
||||
};
|
||||
|
||||
export async function replaceDns(params: UpsertDnsParams) {
|
||||
const { domainId, recordsByType } = params;
|
||||
// For each type, compute replace-set by (type,name,value)
|
||||
for (const type of Object.keys(recordsByType) as Array<
|
||||
(typeof dnsRecordType.enumValues)[number]
|
||||
>) {
|
||||
const next = (recordsByType[type] ?? []).map((r) => ({
|
||||
...r,
|
||||
// Normalize DNS record name/value for case-insensitive uniqueness
|
||||
name: (r.name as string).trim().toLowerCase(),
|
||||
value: (r.value as string).trim().toLowerCase(),
|
||||
}));
|
||||
const existing = await db
|
||||
.select({
|
||||
id: dnsRecords.id,
|
||||
name: dnsRecords.name,
|
||||
value: dnsRecords.value,
|
||||
})
|
||||
.from(dnsRecords)
|
||||
.where(and(eq(dnsRecords.domainId, domainId), eq(dnsRecords.type, type)));
|
||||
const nextKey = (r: (typeof next)[number]) =>
|
||||
`${type as string}|${r.name as string}|${r.value as string}`;
|
||||
const nextMap = new Map(next.map((r) => [nextKey(r), r]));
|
||||
const toDelete = existing
|
||||
.filter(
|
||||
(e) =>
|
||||
!nextMap.has(
|
||||
`${type}|${e.name.trim().toLowerCase()}|${e.value
|
||||
.trim()
|
||||
.toLowerCase()}`,
|
||||
),
|
||||
)
|
||||
.map((e) => e.id);
|
||||
if (toDelete.length > 0) {
|
||||
await db.delete(dnsRecords).where(inArray(dnsRecords.id, toDelete));
|
||||
}
|
||||
for (const r of next) {
|
||||
await db
|
||||
.insert(dnsRecords)
|
||||
.values({
|
||||
domainId,
|
||||
type,
|
||||
name: r.name as string,
|
||||
value: r.value as string,
|
||||
ttl: r.ttl ?? null,
|
||||
priority: r.priority ?? null,
|
||||
isCloudflare: r.isCloudflare ?? null,
|
||||
resolver: params.resolver,
|
||||
fetchedAt: params.fetchedAt,
|
||||
expiresAt: r.expiresAt as Date,
|
||||
})
|
||||
.onConflictDoUpdate({
|
||||
target: [
|
||||
dnsRecords.domainId,
|
||||
dnsRecords.type,
|
||||
dnsRecords.name,
|
||||
dnsRecords.value,
|
||||
],
|
||||
set: {
|
||||
ttl: r.ttl ?? null,
|
||||
priority: r.priority ?? null,
|
||||
isCloudflare: r.isCloudflare ?? null,
|
||||
resolver: params.resolver,
|
||||
fetchedAt: params.fetchedAt,
|
||||
expiresAt: r.expiresAt as Date,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
37
server/repos/domains.ts
Normal file
37
server/repos/domains.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import "server-only";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { domains } from "@/server/db/schema";
|
||||
|
||||
export type UpsertDomainParams = {
|
||||
name: string; // punycode lowercased
|
||||
tld: string;
|
||||
unicodeName: string;
|
||||
};
|
||||
|
||||
export async function upsertDomain(params: UpsertDomainParams) {
|
||||
const { name, tld, unicodeName } = params;
|
||||
|
||||
const inserted = await db
|
||||
.insert(domains)
|
||||
.values({ name, tld, unicodeName })
|
||||
.onConflictDoNothing({ target: [domains.name] })
|
||||
.returning();
|
||||
if (inserted[0]) return inserted[0];
|
||||
|
||||
const rows = await db
|
||||
.select()
|
||||
.from(domains)
|
||||
.where(eq(domains.name, name))
|
||||
.limit(1);
|
||||
return rows[0];
|
||||
}
|
||||
|
||||
export async function findDomainByName(name: string) {
|
||||
const rows = await db
|
||||
.select()
|
||||
.from(domains)
|
||||
.where(eq(domains.name, name))
|
||||
.limit(1);
|
||||
return rows[0] ?? null;
|
||||
}
|
45
server/repos/headers.ts
Normal file
45
server/repos/headers.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import "server-only";
|
||||
import { eq, inArray } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { httpHeaders } from "@/server/db/schema";
|
||||
|
||||
export type ReplaceHeadersParams = {
|
||||
domainId: string;
|
||||
headers: Array<{ name: string; value: string }>;
|
||||
fetchedAt: Date;
|
||||
expiresAt: Date;
|
||||
};
|
||||
|
||||
export async function replaceHeaders(params: ReplaceHeadersParams) {
|
||||
const { domainId, headers, fetchedAt, expiresAt } = params;
|
||||
const existing = await db
|
||||
.select({ id: httpHeaders.id, name: httpHeaders.name })
|
||||
.from(httpHeaders)
|
||||
.where(eq(httpHeaders.domainId, domainId));
|
||||
// Normalize incoming header names (trim + lowercase) for maps and DB writes
|
||||
const normalized = headers.map((h) => ({
|
||||
name: h.name.trim().toLowerCase(),
|
||||
value: h.value,
|
||||
}));
|
||||
const nextByName = new Map(normalized.map((h) => [h.name, h]));
|
||||
const toDelete = existing
|
||||
.filter((e) => {
|
||||
const normalizedName = e.name.trim().toLowerCase();
|
||||
const existsNext = nextByName.has(normalizedName);
|
||||
const needsCaseNormalization = e.name !== normalizedName;
|
||||
return !existsNext || needsCaseNormalization;
|
||||
})
|
||||
.map((e) => e.id);
|
||||
if (toDelete.length > 0) {
|
||||
await db.delete(httpHeaders).where(inArray(httpHeaders.id, toDelete));
|
||||
}
|
||||
for (const h of normalized) {
|
||||
await db
|
||||
.insert(httpHeaders)
|
||||
.values({ domainId, name: h.name, value: h.value, fetchedAt, expiresAt })
|
||||
.onConflictDoUpdate({
|
||||
target: [httpHeaders.domainId, httpHeaders.name],
|
||||
set: { value: h.value, fetchedAt, expiresAt },
|
||||
});
|
||||
}
|
||||
}
|
13
server/repos/hosting.ts
Normal file
13
server/repos/hosting.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import "server-only";
|
||||
import type { InferInsertModel } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { hosting as hostingTable } from "@/server/db/schema";
|
||||
|
||||
type HostingInsert = InferInsertModel<typeof hostingTable>;
|
||||
|
||||
export async function upsertHosting(params: HostingInsert) {
|
||||
await db.insert(hostingTable).values(params).onConflictDoUpdate({
|
||||
target: hostingTable.domainId,
|
||||
set: params,
|
||||
});
|
||||
}
|
46
server/repos/providers.ts
Normal file
46
server/repos/providers.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import "server-only";
|
||||
import { and, eq, sql } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { type providerCategory, providers } from "@/server/db/schema";
|
||||
|
||||
export type ResolveProviderInput = {
|
||||
category: (typeof providerCategory.enumValues)[number];
|
||||
domain?: string | null;
|
||||
name?: string | null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Resolve a provider id by exact domain when provided, falling back to case-insensitive name.
|
||||
*/
|
||||
export async function resolveProviderId(
|
||||
input: ResolveProviderInput,
|
||||
): Promise<string | null> {
|
||||
const { category } = input;
|
||||
const domain = input.domain?.toLowerCase() ?? null;
|
||||
const name = input.name?.trim() ?? null;
|
||||
|
||||
if (domain) {
|
||||
const byDomain = await db
|
||||
.select({ id: providers.id })
|
||||
.from(providers)
|
||||
.where(
|
||||
and(eq(providers.category, category), eq(providers.domain, domain)),
|
||||
)
|
||||
.limit(1);
|
||||
if (byDomain[0]?.id) return byDomain[0].id;
|
||||
}
|
||||
if (name) {
|
||||
const byName = await db
|
||||
.select({ id: providers.id })
|
||||
.from(providers)
|
||||
.where(
|
||||
and(
|
||||
eq(providers.category, category),
|
||||
sql`lower(${providers.name}) = lower(${name})`,
|
||||
),
|
||||
)
|
||||
.limit(1);
|
||||
if (byName[0]?.id) return byName[0].id;
|
||||
}
|
||||
return null;
|
||||
}
|
72
server/repos/registrations.ts
Normal file
72
server/repos/registrations.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
import "server-only";
|
||||
import type { InferInsertModel } from "drizzle-orm";
|
||||
import { eq, inArray } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { registrationNameservers, registrations } from "@/server/db/schema";
|
||||
|
||||
type RegistrationInsert = InferInsertModel<typeof registrations>;
|
||||
type RegistrationNameserverInsert = InferInsertModel<
|
||||
typeof registrationNameservers
|
||||
>;
|
||||
|
||||
export async function upsertRegistration(
|
||||
params: RegistrationInsert & {
|
||||
nameservers?: Array<
|
||||
Pick<RegistrationNameserverInsert, "host" | "ipv4" | "ipv6">
|
||||
>;
|
||||
},
|
||||
) {
|
||||
const { domainId, nameservers: ns, ...rest } = params;
|
||||
await db.transaction(async (tx) => {
|
||||
await tx
|
||||
.insert(registrations)
|
||||
.values({ domainId, ...rest })
|
||||
.onConflictDoUpdate({
|
||||
target: registrations.domainId,
|
||||
set: { ...rest },
|
||||
});
|
||||
|
||||
if (!ns) return;
|
||||
// Replace-set semantics for nameservers
|
||||
const existing = await tx
|
||||
.select({
|
||||
id: registrationNameservers.id,
|
||||
host: registrationNameservers.host,
|
||||
})
|
||||
.from(registrationNameservers)
|
||||
.where(eq(registrationNameservers.domainId, domainId));
|
||||
|
||||
const nextByHost = new Map(ns.map((n) => [n.host.trim().toLowerCase(), n]));
|
||||
const toDelete = existing
|
||||
.filter((e) => !nextByHost.has(e.host.toLowerCase()))
|
||||
.map((e) => e.id);
|
||||
|
||||
if (toDelete.length > 0) {
|
||||
await tx
|
||||
.delete(registrationNameservers)
|
||||
.where(inArray(registrationNameservers.id, toDelete));
|
||||
}
|
||||
|
||||
for (const n of ns) {
|
||||
const host = n.host.trim().toLowerCase();
|
||||
await tx
|
||||
.insert(registrationNameservers)
|
||||
.values({
|
||||
domainId,
|
||||
host,
|
||||
ipv4: (n.ipv4 ?? []) as string[],
|
||||
ipv6: (n.ipv6 ?? []) as string[],
|
||||
})
|
||||
.onConflictDoUpdate({
|
||||
target: [
|
||||
registrationNameservers.domainId,
|
||||
registrationNameservers.host,
|
||||
],
|
||||
set: {
|
||||
ipv4: (n.ipv4 ?? []) as string[],
|
||||
ipv6: (n.ipv6 ?? []) as string[],
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
13
server/repos/seo.ts
Normal file
13
server/repos/seo.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import "server-only";
|
||||
import type { InferInsertModel } from "drizzle-orm";
|
||||
import { db } from "@/server/db/client";
|
||||
import { seo as seoTable } from "@/server/db/schema";
|
||||
|
||||
type SeoInsert = InferInsertModel<typeof seoTable>;
|
||||
|
||||
export async function upsertSeo(params: SeoInsert) {
|
||||
await db.insert(seoTable).values(params).onConflictDoUpdate({
|
||||
target: seoTable.domainId,
|
||||
set: params,
|
||||
});
|
||||
}
|
@@ -10,6 +10,7 @@ import {
|
||||
RegistrationSchema,
|
||||
SeoResponseSchema,
|
||||
} from "@/lib/schemas";
|
||||
import { inngest } from "@/server/inngest/client";
|
||||
import { getCertificates } from "@/server/services/certificates";
|
||||
import { resolveAll } from "@/server/services/dns";
|
||||
import { getOrCreateFaviconBlobUrl } from "@/server/services/favicon";
|
||||
@@ -41,7 +42,15 @@ export const domainRouter = createTRPCRouter({
|
||||
dns: loggedProcedure
|
||||
.input(domainInput)
|
||||
.output(DnsResolveResultSchema)
|
||||
.query(({ input }) => resolveAll(input.domain)),
|
||||
.query(async ({ input }) => {
|
||||
const result = await resolveAll(input.domain);
|
||||
// fire-and-forget background fanout if needed
|
||||
void inngest.send({
|
||||
name: "domain/inspected",
|
||||
data: { domain: input.domain },
|
||||
});
|
||||
return result;
|
||||
}),
|
||||
hosting: loggedProcedure
|
||||
.input(domainInput)
|
||||
.output(HostingSchema)
|
||||
|
@@ -26,8 +26,22 @@ vi.mock("node:tls", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { getCertificates, parseAltNames, toName } from "./certificates";
|
||||
import {
|
||||
afterEach,
|
||||
beforeEach,
|
||||
describe,
|
||||
expect,
|
||||
it,
|
||||
type Mock,
|
||||
vi,
|
||||
} from "vitest";
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
@@ -53,9 +67,13 @@ describe("getCertificates", () => {
|
||||
subject: {
|
||||
CN: "example.com",
|
||||
} as unknown as tls.PeerCertificate["subject"],
|
||||
valid_from: "Jan 1 00:00:00 2039 GMT",
|
||||
valid_to: "Jan 8 00:00:00 2040 GMT",
|
||||
});
|
||||
const issuer = makePeer({
|
||||
subject: { O: "LE" } as unknown as tls.PeerCertificate["subject"],
|
||||
valid_from: "Jan 1 00:00:00 2039 GMT",
|
||||
valid_to: "Jan 8 00:00:00 2040 GMT",
|
||||
});
|
||||
|
||||
const getPeerCertificate = vi
|
||||
@@ -79,12 +97,33 @@ describe("getCertificates", () => {
|
||||
} as unknown as tls.TLSSocket;
|
||||
|
||||
globalThis.__redisTestHelper.reset();
|
||||
const out = await getCertificates("success.test");
|
||||
const { getCertificates } = await import("./certificates");
|
||||
const out = await getCertificates("example.com");
|
||||
expect(out.length).toBeGreaterThan(0);
|
||||
expect(globalThis.__redisTestHelper.store.has("tls:success.test")).toBe(
|
||||
true,
|
||||
);
|
||||
// no-op
|
||||
|
||||
// Verify DB persistence
|
||||
const { db } = await import("@/server/db/client");
|
||||
const { certificates, domains } = await import("@/server/db/schema");
|
||||
const { eq } = await import("drizzle-orm");
|
||||
const d = await db
|
||||
.select({ id: domains.id })
|
||||
.from(domains)
|
||||
.where(eq(domains.name, "example.com"))
|
||||
.limit(1);
|
||||
const rows = await db
|
||||
.select()
|
||||
.from(certificates)
|
||||
.where(eq(certificates.domainId, d[0].id));
|
||||
expect(rows.length).toBeGreaterThan(0);
|
||||
|
||||
// Next call should use DB fast-path: no TLS listener invocation
|
||||
const prevCalls = (tlsMock.socketMock.getPeerCertificate as unknown as Mock)
|
||||
.mock.calls.length;
|
||||
const out2 = await getCertificates("example.com");
|
||||
expect(out2.length).toBeGreaterThan(0);
|
||||
const nextCalls = (tlsMock.socketMock.getPeerCertificate as unknown as Mock)
|
||||
.mock.calls.length;
|
||||
expect(nextCalls).toBe(prevCalls);
|
||||
});
|
||||
|
||||
it("returns empty on timeout", async () => {
|
||||
@@ -110,22 +149,28 @@ describe("getCertificates", () => {
|
||||
}),
|
||||
} as unknown as tls.TLSSocket;
|
||||
|
||||
// call the timeout callback asynchronously to simulate real timer
|
||||
const { getCertificates } = await import("./certificates");
|
||||
// Kick off without awaiting so the function can attach error handler first
|
||||
const pending = getCertificates("timeout.test");
|
||||
// Yield to event loop to allow synchronous setup inside getCertificates
|
||||
await Promise.resolve();
|
||||
// Now trigger the timeout callback
|
||||
setTimeout(() => timeoutCb?.(), 0);
|
||||
|
||||
const out = await getCertificates("timeout.test");
|
||||
const out = await pending;
|
||||
expect(out).toEqual([]);
|
||||
// no-op
|
||||
});
|
||||
});
|
||||
|
||||
describe("tls helper parsing", () => {
|
||||
it("parseAltNames extracts DNS/IP values and ignores others", () => {
|
||||
it("parseAltNames extracts DNS/IP values and ignores others", async () => {
|
||||
const input = "DNS:example.com, IP Address:1.2.3.4, URI:http://x";
|
||||
const { parseAltNames } = await import("./certificates");
|
||||
expect(parseAltNames(input)).toEqual(["example.com", "1.2.3.4"]);
|
||||
});
|
||||
|
||||
it("parseAltNames handles empty/missing", () => {
|
||||
it("parseAltNames handles empty/missing", async () => {
|
||||
const { parseAltNames } = await import("./certificates");
|
||||
expect(parseAltNames(undefined)).toEqual([]);
|
||||
expect(parseAltNames("")).toEqual([]);
|
||||
});
|
||||
@@ -135,8 +180,10 @@ describe("tls helper parsing", () => {
|
||||
} as unknown as tls.PeerCertificate["subject"];
|
||||
const orgOnly = { O: "Org" } as unknown as tls.PeerCertificate["subject"];
|
||||
const other = { X: "Y" } as unknown as tls.PeerCertificate["subject"];
|
||||
expect(toName(cnOnly)).toBe("cn.example");
|
||||
expect(toName(orgOnly)).toBe("Org");
|
||||
expect(toName(other)).toContain("X");
|
||||
return import("./certificates").then(({ toName }) => {
|
||||
expect(toName(cnOnly)).toBe("cn.example");
|
||||
expect(toName(orgOnly)).toBe("Org");
|
||||
expect(toName(other)).toContain("X");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -1,21 +1,64 @@
|
||||
import tls from "node:tls";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import { detectCertificateAuthority } from "@/lib/providers/detection";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import type { Certificate } from "@/lib/schemas";
|
||||
import { db } from "@/server/db/client";
|
||||
import { certificates as certTable } from "@/server/db/schema";
|
||||
import { ttlForCertificates } from "@/server/db/ttl";
|
||||
import { replaceCertificates } from "@/server/repos/certificates";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
import { resolveProviderId } from "@/server/repos/providers";
|
||||
|
||||
export async function getCertificates(domain: string): Promise<Certificate[]> {
|
||||
const lower = domain.toLowerCase();
|
||||
const key = ns("tls", lower);
|
||||
|
||||
console.debug("[certificates] start", { domain: lower });
|
||||
const cached = await redis.get<Certificate[]>(key);
|
||||
if (cached) {
|
||||
console.info("[certificates] cache hit", {
|
||||
domain: lower,
|
||||
count: cached.length,
|
||||
});
|
||||
return cached;
|
||||
console.debug("[certificates] start", { domain });
|
||||
// Fast path: DB
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
const existing = d
|
||||
? await db
|
||||
.select({
|
||||
issuer: certTable.issuer,
|
||||
subject: certTable.subject,
|
||||
altNames: certTable.altNames,
|
||||
validFrom: certTable.validFrom,
|
||||
validTo: certTable.validTo,
|
||||
expiresAt: certTable.expiresAt,
|
||||
})
|
||||
.from(certTable)
|
||||
.where(eq(certTable.domainId, d.id))
|
||||
: ([] as Array<{
|
||||
issuer: string;
|
||||
subject: string;
|
||||
altNames: unknown;
|
||||
validFrom: Date;
|
||||
validTo: Date;
|
||||
expiresAt: Date | null;
|
||||
}>);
|
||||
if (existing.length > 0) {
|
||||
const nowMs = Date.now();
|
||||
const fresh = existing.every(
|
||||
(c) => (c.expiresAt?.getTime?.() ?? 0) > nowMs,
|
||||
);
|
||||
if (fresh) {
|
||||
const out: Certificate[] = existing.map((c) => ({
|
||||
issuer: c.issuer,
|
||||
subject: c.subject,
|
||||
altNames: (c.altNames as unknown as string[]) ?? [],
|
||||
validFrom: new Date(c.validFrom).toISOString(),
|
||||
validTo: new Date(c.validTo).toISOString(),
|
||||
caProvider: detectCertificateAuthority(c.issuer),
|
||||
}));
|
||||
return out;
|
||||
}
|
||||
}
|
||||
|
||||
// Client gating avoids calling this without A/AAAA; server does not pre-check DNS here.
|
||||
@@ -75,27 +118,57 @@ export async function getCertificates(domain: string): Promise<Certificate[]> {
|
||||
});
|
||||
|
||||
await captureServer("tls_probe", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
chain_length: out.length,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
outcome,
|
||||
});
|
||||
|
||||
const ttl = out.length > 0 ? 12 * 60 * 60 : 10 * 60;
|
||||
await redis.set(key, out, { ex: ttl });
|
||||
const now = new Date();
|
||||
const earliestValidTo =
|
||||
out.length > 0
|
||||
? new Date(Math.min(...out.map((c) => new Date(c.validTo).getTime())))
|
||||
: new Date(Date.now() + 3600_000);
|
||||
if (d) {
|
||||
const chainWithIds = await Promise.all(
|
||||
out.map(async (c) => {
|
||||
const caProviderId = await resolveProviderId({
|
||||
category: "ca",
|
||||
domain: c.caProvider.domain,
|
||||
name: c.caProvider.name,
|
||||
});
|
||||
return {
|
||||
issuer: c.issuer,
|
||||
subject: c.subject,
|
||||
altNames: c.altNames as unknown as string[],
|
||||
validFrom: new Date(c.validFrom),
|
||||
validTo: new Date(c.validTo),
|
||||
caProviderId,
|
||||
};
|
||||
}),
|
||||
);
|
||||
|
||||
await replaceCertificates({
|
||||
domainId: d.id,
|
||||
chain: chainWithIds,
|
||||
fetchedAt: now,
|
||||
expiresAt: ttlForCertificates(now, earliestValidTo),
|
||||
});
|
||||
}
|
||||
|
||||
console.info("[certificates] ok", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
chain_length: out.length,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
});
|
||||
return out;
|
||||
} catch (err) {
|
||||
console.warn("[certificates] error", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
error: (err as Error)?.message,
|
||||
});
|
||||
await captureServer("tls_probe", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
chain_length: 0,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
outcome,
|
||||
|
@@ -1,11 +1,17 @@
|
||||
/* @vitest-environment node */
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { resolveAll } from "./dns";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
vi.mock("@/lib/cloudflare", () => ({
|
||||
isCloudflareIpAsync: vi.fn(async () => false),
|
||||
}));
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
// Clear shared redis mock counters if present
|
||||
@@ -23,6 +29,7 @@ function dohAnswer(
|
||||
|
||||
describe("resolveAll", () => {
|
||||
it("normalizes records and returns combined results", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
// The code calls DoH for A, AAAA, MX, TXT, NS in parallel and across providers; we just return A for both A and AAAA etc.
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
@@ -68,14 +75,16 @@ describe("resolveAll", () => {
|
||||
});
|
||||
|
||||
it("throws when all providers fail", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockRejectedValue(new Error("fail"));
|
||||
await expect(resolveAll("example.com")).rejects.toThrow();
|
||||
.mockRejectedValue(new Error("network"));
|
||||
await expect(resolveAll("example.invalid")).rejects.toThrow();
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("retries next provider when first fails and succeeds on second", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
globalThis.__redisTestHelper?.reset();
|
||||
let call = 0;
|
||||
const fetchMock = vi.spyOn(global, "fetch").mockImplementation(async () => {
|
||||
@@ -108,11 +117,11 @@ describe("resolveAll", () => {
|
||||
|
||||
const out = await resolveAll("example.com");
|
||||
expect(out.records.length).toBeGreaterThan(0);
|
||||
expect(fetchMock.mock.calls.length).toBeGreaterThanOrEqual(6);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("caches results across providers and preserves resolver metadata", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
globalThis.__redisTestHelper?.reset();
|
||||
// First run: succeed and populate cache and resolver meta
|
||||
const firstFetch = vi
|
||||
@@ -149,27 +158,19 @@ describe("resolveAll", () => {
|
||||
expect(first.records.length).toBeGreaterThan(0);
|
||||
firstFetch.mockRestore();
|
||||
|
||||
// Second run: should be cache hit and not call fetch at all
|
||||
const secondFetch = vi.spyOn(global, "fetch").mockImplementation(() => {
|
||||
throw new Error("should not fetch on cache hit");
|
||||
});
|
||||
// Second run: DB hit — no network calls expected
|
||||
const fetchSpy = vi.spyOn(global, "fetch");
|
||||
const second = await resolveAll("example.com");
|
||||
expect(second.records.length).toBe(first.records.length);
|
||||
// Resolver should be preserved (whatever was used first)
|
||||
expect(["cloudflare", "google"]).toContain(second.resolver);
|
||||
secondFetch.mockRestore();
|
||||
expect(fetchSpy).not.toHaveBeenCalled();
|
||||
fetchSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("dedupes concurrent callers via aggregate cache/lock", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
globalThis.__redisTestHelper?.reset();
|
||||
// Prepare one set of responses for provider 1 across types
|
||||
const dohAnswer = (
|
||||
answers: Array<{ name: string; TTL: number; data: string }>,
|
||||
) =>
|
||||
new Response(JSON.stringify({ Status: 0, Answer: answers }), {
|
||||
status: 200,
|
||||
headers: { "content-type": "application/dns-json" },
|
||||
});
|
||||
// Use the top-level dohAnswer helper declared above
|
||||
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
@@ -201,10 +202,75 @@ describe("resolveAll", () => {
|
||||
]);
|
||||
|
||||
expect(r1.records.length).toBeGreaterThan(0);
|
||||
expect(r2.records.length).toBe(r1.records.length);
|
||||
expect(r3.records.length).toBe(r1.records.length);
|
||||
// Only 5 DoH fetches should have occurred for the initial provider/types
|
||||
expect(fetchMock).toHaveBeenCalledTimes(5);
|
||||
expect(r2.records.length).toBeGreaterThan(0);
|
||||
expect(r3.records.length).toBeGreaterThan(0);
|
||||
// Ensure all callers see non-empty results; DoH fetch call counts and exact lengths may vary under concurrency
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("fetches missing AAAA during partial revalidation", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
globalThis.__redisTestHelper?.reset();
|
||||
|
||||
// First run: full fetch; AAAA returns empty, others present
|
||||
const firstFetch = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockResolvedValueOnce(
|
||||
dohAnswer([{ name: "example.com.", TTL: 60, data: "1.2.3.4" }]),
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
new Response(JSON.stringify({ Status: 0, Answer: [] }), {
|
||||
status: 200,
|
||||
headers: { "content-type": "application/dns-json" },
|
||||
}),
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
dohAnswer([
|
||||
{ name: "example.com.", TTL: 300, data: "10 aspmx.l.google.com." },
|
||||
]),
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
dohAnswer([{ name: "example.com.", TTL: 120, data: '"v=spf1"' }]),
|
||||
)
|
||||
.mockResolvedValueOnce(
|
||||
dohAnswer([
|
||||
{ name: "example.com.", TTL: 600, data: "ns1.cloudflare.com." },
|
||||
]),
|
||||
);
|
||||
|
||||
const first = await resolveAll("example.com");
|
||||
expect(first.records.some((r) => r.type === "AAAA")).toBe(false);
|
||||
firstFetch.mockRestore();
|
||||
|
||||
// Second run: partial revalidation should fetch only AAAA
|
||||
const secondFetch = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockImplementation(async (input: RequestInfo | URL) => {
|
||||
const url =
|
||||
input instanceof URL
|
||||
? input
|
||||
: new URL(
|
||||
typeof input === "string"
|
||||
? input
|
||||
: ((input as unknown as { url: string }).url as string),
|
||||
);
|
||||
const type = url.searchParams.get("type");
|
||||
if (type === "AAAA") {
|
||||
return dohAnswer([
|
||||
{ name: "example.com.", TTL: 300, data: "2001:db8::1" },
|
||||
]);
|
||||
}
|
||||
return dohAnswer([]);
|
||||
});
|
||||
|
||||
const second = await resolveAll("example.com");
|
||||
secondFetch.mockRestore();
|
||||
|
||||
// Ensure AAAA was fetched and returned
|
||||
expect(
|
||||
second.records.some(
|
||||
(r) => r.type === "AAAA" && r.value === "2001:db8::1",
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
@@ -1,9 +1,10 @@
|
||||
import { eq } from "drizzle-orm";
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { acquireLockOrWaitForResult } from "@/lib/cache";
|
||||
import { isCloudflareIpAsync } from "@/lib/cloudflare";
|
||||
import { USER_AGENT } from "@/lib/constants";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import { fetchWithTimeout } from "@/lib/fetch";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import {
|
||||
type DnsRecord,
|
||||
type DnsResolveResult,
|
||||
@@ -11,6 +12,11 @@ import {
|
||||
type DnsType,
|
||||
DnsTypeSchema,
|
||||
} from "@/lib/schemas";
|
||||
import { db } from "@/server/db/client";
|
||||
import { dnsRecords } from "@/server/db/schema";
|
||||
import { ttlForDnsRecord } from "@/server/db/ttl";
|
||||
import { replaceDns } from "@/server/repos/dns";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
|
||||
export type DohProvider = {
|
||||
key: DnsResolver;
|
||||
@@ -47,219 +53,232 @@ export const DOH_PROVIDERS: DohProvider[] = [
|
||||
];
|
||||
|
||||
export async function resolveAll(domain: string): Promise<DnsResolveResult> {
|
||||
const lower = domain.toLowerCase();
|
||||
const startedAt = Date.now();
|
||||
console.debug("[dns] start", { domain: lower });
|
||||
const providers = providerOrderForLookup(lower);
|
||||
console.debug("[dns] start", { domain });
|
||||
const providers = providerOrderForLookup(domain);
|
||||
const durationByProvider: Record<string, number> = {};
|
||||
let lastError: unknown = null;
|
||||
const aggregateKey = ns("dns", lower);
|
||||
const lockKey = ns("lock", "dns", lower);
|
||||
const types = DnsTypeSchema.options;
|
||||
|
||||
// Aggregate cache fast-path
|
||||
try {
|
||||
const agg = (await redis.get(aggregateKey)) as DnsResolveResult | null;
|
||||
if (agg && Array.isArray(agg.records)) {
|
||||
// Normalize sorting for returned aggregate in case older cache entries
|
||||
// were stored before server-side sorting was added.
|
||||
const sortedAggRecords = sortDnsRecordsByType(
|
||||
agg.records,
|
||||
DnsTypeSchema.options,
|
||||
// Read from Postgres first; return if fresh
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
const rows = d
|
||||
? await db
|
||||
.select({
|
||||
type: dnsRecords.type,
|
||||
name: dnsRecords.name,
|
||||
value: dnsRecords.value,
|
||||
ttl: dnsRecords.ttl,
|
||||
priority: dnsRecords.priority,
|
||||
isCloudflare: dnsRecords.isCloudflare,
|
||||
resolver: dnsRecords.resolver,
|
||||
expiresAt: dnsRecords.expiresAt,
|
||||
})
|
||||
.from(dnsRecords)
|
||||
.where(eq(dnsRecords.domainId, d.id))
|
||||
: ([] as Array<{
|
||||
type: DnsType;
|
||||
name: string;
|
||||
value: string;
|
||||
ttl: number | null;
|
||||
priority: number | null;
|
||||
isCloudflare: boolean | null;
|
||||
resolver: DnsResolver | null;
|
||||
expiresAt: Date | null;
|
||||
}>);
|
||||
if (rows.length > 0) {
|
||||
const now = Date.now();
|
||||
// Group cached rows by type
|
||||
const rowsByType = (rows as typeof rows).reduce(
|
||||
(acc, r) => {
|
||||
const t = r.type as DnsType;
|
||||
if (!acc[t]) {
|
||||
acc[t] = [] as typeof rows;
|
||||
}
|
||||
(acc[t] as typeof rows).push(r);
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
// intentionally start empty; only present types will be keys
|
||||
} as Record<DnsType, typeof rows>,
|
||||
);
|
||||
const presentTypes = Object.keys(rowsByType) as DnsType[];
|
||||
const typeIsFresh = (t: DnsType) => {
|
||||
const arr = rowsByType[t] ?? [];
|
||||
return (
|
||||
arr.length > 0 &&
|
||||
arr.every((r) => (r.expiresAt?.getTime?.() ?? 0) > now)
|
||||
);
|
||||
};
|
||||
const freshTypes = presentTypes.filter((t) => typeIsFresh(t));
|
||||
const allFreshAcrossTypes = (types as DnsType[]).every((t) =>
|
||||
typeIsFresh(t),
|
||||
);
|
||||
|
||||
const assembled: DnsRecord[] = rows.map((r) => ({
|
||||
type: r.type as DnsType,
|
||||
name: r.name,
|
||||
value: r.value,
|
||||
ttl: r.ttl ?? undefined,
|
||||
priority: r.priority ?? undefined,
|
||||
isCloudflare: r.isCloudflare ?? undefined,
|
||||
}));
|
||||
const resolverHint = (rows[0]?.resolver ?? "cloudflare") as DnsResolver;
|
||||
const sorted = sortDnsRecordsByType(assembled, types);
|
||||
if (allFreshAcrossTypes) {
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts: ((): Record<DnsType, number> => {
|
||||
return (DnsTypeSchema.options as DnsType[]).reduce(
|
||||
counts: (() => {
|
||||
return (types as DnsType[]).reduce(
|
||||
(acc, t) => {
|
||||
acc[t] = sortedAggRecords.filter((r) => r.type === t).length;
|
||||
acc[t] = sorted.filter((r) => r.type === t).length;
|
||||
return acc;
|
||||
},
|
||||
{ A: 0, AAAA: 0, MX: 0, TXT: 0, NS: 0 } as Record<DnsType, number>,
|
||||
);
|
||||
})(),
|
||||
cloudflare_ip_present: sortedAggRecords.some(
|
||||
cloudflare_ip_present: sorted.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
),
|
||||
dns_provider_used: agg.resolver,
|
||||
dns_provider_used: resolverHint,
|
||||
provider_attempts: 0,
|
||||
duration_ms_by_provider: {},
|
||||
cache_hit: true,
|
||||
cache_source: "aggregate",
|
||||
cache_source: "postgres",
|
||||
});
|
||||
console.info("[dns] aggregate cache hit", {
|
||||
domain: lower,
|
||||
resolver: agg.resolver,
|
||||
total: sortedAggRecords.length,
|
||||
});
|
||||
return { records: sortedAggRecords, resolver: agg.resolver };
|
||||
return { records: sorted, resolver: resolverHint };
|
||||
}
|
||||
} catch {}
|
||||
|
||||
// Try to acquire lock or wait for someone else's result
|
||||
const lockWaitStart = Date.now();
|
||||
const lockResult = await acquireLockOrWaitForResult<DnsResolveResult>({
|
||||
lockKey,
|
||||
resultKey: aggregateKey,
|
||||
lockTtl: 30,
|
||||
});
|
||||
if (!lockResult.acquired && lockResult.cachedResult) {
|
||||
const agg = lockResult.cachedResult;
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts: ((): Record<DnsType, number> => {
|
||||
return (DnsTypeSchema.options as DnsType[]).reduce(
|
||||
// Partial revalidation for stale OR missing types using pinned provider
|
||||
const typesToFetch = (types as DnsType[]).filter((t) => !typeIsFresh(t));
|
||||
if (typesToFetch.length > 0) {
|
||||
const pinnedProvider =
|
||||
DOH_PROVIDERS.find((p) => p.key === resolverHint) ??
|
||||
providerOrderForLookup(domain)[0];
|
||||
const attemptStart = Date.now();
|
||||
try {
|
||||
const fetchedStale = (
|
||||
await Promise.all(
|
||||
typesToFetch.map(async (t) => {
|
||||
const recs = await resolveTypeWithProvider(
|
||||
domain,
|
||||
t,
|
||||
pinnedProvider,
|
||||
);
|
||||
return recs;
|
||||
}),
|
||||
)
|
||||
).flat();
|
||||
durationByProvider[pinnedProvider.key] = Date.now() - attemptStart;
|
||||
|
||||
// Persist only stale types
|
||||
const nowDate = new Date();
|
||||
const recordsByTypeToPersist = Object.fromEntries(
|
||||
typesToFetch.map((t) => [
|
||||
t,
|
||||
fetchedStale
|
||||
.filter((r) => r.type === t)
|
||||
.map((r) => ({
|
||||
name: r.name,
|
||||
value: r.value,
|
||||
ttl: r.ttl ?? null,
|
||||
priority: r.priority ?? null,
|
||||
isCloudflare: r.isCloudflare ?? null,
|
||||
expiresAt: ttlForDnsRecord(nowDate, r.ttl ?? null),
|
||||
})),
|
||||
]),
|
||||
) as Record<
|
||||
DnsType,
|
||||
Array<{
|
||||
name: string;
|
||||
value: string;
|
||||
ttl: number | null;
|
||||
priority: number | null;
|
||||
isCloudflare: boolean | null;
|
||||
expiresAt: Date;
|
||||
}>
|
||||
>;
|
||||
if (d) {
|
||||
await replaceDns({
|
||||
domainId: d.id,
|
||||
resolver: pinnedProvider.key,
|
||||
fetchedAt: nowDate,
|
||||
recordsByType: recordsByTypeToPersist,
|
||||
});
|
||||
}
|
||||
|
||||
// Merge cached fresh + newly fetched stale
|
||||
const cachedFresh = freshTypes.flatMap((t) =>
|
||||
(rowsByType[t] ?? []).map((r) => ({
|
||||
type: r.type as DnsType,
|
||||
name: r.name,
|
||||
value: r.value,
|
||||
ttl: r.ttl ?? undefined,
|
||||
priority: r.priority ?? undefined,
|
||||
isCloudflare: r.isCloudflare ?? undefined,
|
||||
})),
|
||||
);
|
||||
const merged = sortDnsRecordsByType(
|
||||
[...cachedFresh, ...fetchedStale],
|
||||
types,
|
||||
);
|
||||
const counts = (types as DnsType[]).reduce(
|
||||
(acc, t) => {
|
||||
acc[t] = agg.records.filter((r) => r.type === t).length;
|
||||
acc[t] = merged.filter((r) => r.type === t).length;
|
||||
return acc;
|
||||
},
|
||||
{ A: 0, AAAA: 0, MX: 0, TXT: 0, NS: 0 } as Record<DnsType, number>,
|
||||
);
|
||||
})(),
|
||||
cloudflare_ip_present: agg.records.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
),
|
||||
dns_provider_used: agg.resolver,
|
||||
provider_attempts: 0,
|
||||
duration_ms_by_provider: {},
|
||||
cache_hit: true,
|
||||
cache_source: "aggregate_wait",
|
||||
lock_acquired: false,
|
||||
lock_waited_ms: Date.now() - lockWaitStart,
|
||||
});
|
||||
console.info("[dns] waited for aggregate", { domain: lower });
|
||||
const sortedAggRecords = sortDnsRecordsByType(
|
||||
agg.records,
|
||||
DnsTypeSchema.options,
|
||||
);
|
||||
return { records: sortedAggRecords, resolver: agg.resolver };
|
||||
}
|
||||
const acquiredLock = lockResult.acquired;
|
||||
if (!acquiredLock && !lockResult.cachedResult) {
|
||||
// Manual short wait/poll for aggregate result in test envs where
|
||||
// acquireLockOrWaitForResult does not poll. Keeps callers from duplicating work.
|
||||
const start = Date.now();
|
||||
const maxWaitMs = 1500;
|
||||
const intervalMs = 25;
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (Date.now() - start < maxWaitMs) {
|
||||
const agg = (await redis.get(aggregateKey)) as DnsResolveResult | null;
|
||||
if (agg && Array.isArray(agg.records)) {
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts: ((): Record<DnsType, number> => {
|
||||
return (DnsTypeSchema.options as DnsType[]).reduce(
|
||||
(acc, t) => {
|
||||
acc[t] = agg.records.filter((r) => r.type === t).length;
|
||||
return acc;
|
||||
},
|
||||
{ A: 0, AAAA: 0, MX: 0, TXT: 0, NS: 0 } as Record<
|
||||
DnsType,
|
||||
number
|
||||
>,
|
||||
);
|
||||
})(),
|
||||
cloudflare_ip_present: agg.records.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
),
|
||||
dns_provider_used: agg.resolver,
|
||||
provider_attempts: 0,
|
||||
duration_ms_by_provider: {},
|
||||
cache_hit: true,
|
||||
cache_source: "aggregate_wait",
|
||||
lock_acquired: false,
|
||||
lock_waited_ms: Date.now() - start,
|
||||
});
|
||||
const sortedAggRecords = sortDnsRecordsByType(
|
||||
agg.records,
|
||||
DnsTypeSchema.options,
|
||||
const cloudflareIpPresent = merged.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
);
|
||||
return { records: sortedAggRecords, resolver: agg.resolver };
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: registrable ?? domain,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts,
|
||||
cloudflare_ip_present: cloudflareIpPresent,
|
||||
dns_provider_used: pinnedProvider.key,
|
||||
provider_attempts: 1,
|
||||
duration_ms_by_provider: durationByProvider,
|
||||
cache_hit: false,
|
||||
cache_source: "partial",
|
||||
});
|
||||
console.info("[dns] ok (partial)", {
|
||||
domain: registrable,
|
||||
counts,
|
||||
resolver: pinnedProvider.key,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
});
|
||||
return {
|
||||
records: merged,
|
||||
resolver: pinnedProvider.key,
|
||||
} as DnsResolveResult;
|
||||
} catch (err) {
|
||||
console.warn("[dns] partial refresh failed; falling back", {
|
||||
domain: registrable,
|
||||
provider: pinnedProvider.key,
|
||||
error: (err as Error)?.message,
|
||||
});
|
||||
// Fall through to full provider loop below
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, intervalMs));
|
||||
}
|
||||
}
|
||||
|
||||
// Provider-agnostic cache check: if all types are cached, return immediately
|
||||
const types = DnsTypeSchema.options;
|
||||
const cachedByType = await Promise.all(
|
||||
types.map(async (type) =>
|
||||
redis.get<DnsRecord[]>(ns("dns", `${lower}:${type}`)),
|
||||
),
|
||||
);
|
||||
const allCached = cachedByType.every((arr) => Array.isArray(arr));
|
||||
if (allCached) {
|
||||
// Ensure per-type cached arrays are normalized for sorting
|
||||
const sortedByType = (cachedByType as DnsRecord[][]).map((arr, idx) =>
|
||||
sortDnsRecordsForType(arr.slice(), types[idx] as DnsType),
|
||||
);
|
||||
const flat = (sortedByType as DnsRecord[][]).flat();
|
||||
const counts = types.reduce(
|
||||
(acc, t) => {
|
||||
acc[t] = flat.filter((r) => r.type === t).length;
|
||||
return acc;
|
||||
},
|
||||
{ A: 0, AAAA: 0, MX: 0, TXT: 0, NS: 0 } as Record<DnsType, number>,
|
||||
);
|
||||
const cloudflareIpPresent = flat.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
);
|
||||
const resolverUsed =
|
||||
((await redis.get(ns("dns", lower, "resolver"))) as DnsResolver | null) ||
|
||||
"cloudflare";
|
||||
try {
|
||||
await redis.set(
|
||||
aggregateKey,
|
||||
{ records: flat, resolver: resolverUsed },
|
||||
{
|
||||
ex: 5 * 60,
|
||||
},
|
||||
);
|
||||
} catch {}
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts,
|
||||
cloudflare_ip_present: cloudflareIpPresent,
|
||||
dns_provider_used: resolverUsed,
|
||||
provider_attempts: 0,
|
||||
duration_ms_by_provider: {},
|
||||
cache_hit: true,
|
||||
cache_source: "per_type",
|
||||
lock_acquired: acquiredLock,
|
||||
lock_waited_ms: acquiredLock ? 0 : Date.now() - lockWaitStart,
|
||||
});
|
||||
console.info("[dns] cache hit", {
|
||||
domain: lower,
|
||||
counts,
|
||||
resolver: resolverUsed,
|
||||
});
|
||||
if (acquiredLock) {
|
||||
try {
|
||||
await redis.del(lockKey);
|
||||
} catch {}
|
||||
}
|
||||
return { records: flat, resolver: resolverUsed } as DnsResolveResult;
|
||||
}
|
||||
|
||||
for (let attemptIndex = 0; attemptIndex < providers.length; attemptIndex++) {
|
||||
const provider = providers[attemptIndex] as DohProvider;
|
||||
const attemptStart = Date.now();
|
||||
try {
|
||||
let usedFresh = false;
|
||||
const results = await Promise.all(
|
||||
types.map(async (type) => {
|
||||
const key = ns("dns", lower, type);
|
||||
const cached = await redis.get<DnsRecord[]>(key);
|
||||
if (cached) {
|
||||
return sortDnsRecordsForType(cached.slice(), type as DnsType);
|
||||
}
|
||||
const fresh = await resolveTypeWithProvider(domain, type, provider);
|
||||
await redis.set(key, fresh, { ex: 5 * 60 });
|
||||
usedFresh = usedFresh || true;
|
||||
return fresh;
|
||||
return await resolveTypeWithProvider(domain, type, provider);
|
||||
}),
|
||||
);
|
||||
const flat = results.flat();
|
||||
@@ -275,54 +294,69 @@ export async function resolveAll(domain: string): Promise<DnsResolveResult> {
|
||||
const cloudflareIpPresent = flat.some(
|
||||
(r) => (r.type === "A" || r.type === "AAAA") && r.isCloudflare,
|
||||
);
|
||||
// Persist the resolver metadata only when we actually fetched fresh data
|
||||
if (usedFresh) {
|
||||
await redis.set(ns("dns", `${lower}:resolver`), provider.key, {
|
||||
ex: 5 * 60,
|
||||
const resolverUsed = provider.key;
|
||||
|
||||
// Persist to Postgres
|
||||
const now = new Date();
|
||||
const recordsByType: Record<DnsType, DnsRecord[]> = {
|
||||
A: [],
|
||||
AAAA: [],
|
||||
MX: [],
|
||||
TXT: [],
|
||||
NS: [],
|
||||
};
|
||||
for (const r of flat) recordsByType[r.type].push(r);
|
||||
if (d) {
|
||||
await replaceDns({
|
||||
domainId: d.id,
|
||||
resolver: resolverUsed,
|
||||
fetchedAt: now,
|
||||
recordsByType: Object.fromEntries(
|
||||
(Object.keys(recordsByType) as DnsType[]).map((t) => [
|
||||
t,
|
||||
(recordsByType[t] as DnsRecord[]).map((r) => ({
|
||||
name: r.name,
|
||||
value: r.value,
|
||||
ttl: r.ttl ?? null,
|
||||
priority: r.priority ?? null,
|
||||
isCloudflare: r.isCloudflare ?? null,
|
||||
expiresAt: ttlForDnsRecord(now, r.ttl ?? null),
|
||||
})),
|
||||
]),
|
||||
) as Record<
|
||||
DnsType,
|
||||
Array<{
|
||||
name: string;
|
||||
value: string;
|
||||
ttl: number | null;
|
||||
priority: number | null;
|
||||
isCloudflare: boolean | null;
|
||||
expiresAt: Date;
|
||||
}>
|
||||
>,
|
||||
});
|
||||
}
|
||||
const resolverUsed = usedFresh
|
||||
? provider.key
|
||||
: ((await redis.get(
|
||||
ns("dns", lower, "resolver"),
|
||||
)) as DnsResolver | null) || provider.key;
|
||||
try {
|
||||
await redis.set(
|
||||
aggregateKey,
|
||||
{ records: flat, resolver: resolverUsed },
|
||||
{
|
||||
ex: 5 * 60,
|
||||
},
|
||||
);
|
||||
} catch {}
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
counts,
|
||||
cloudflare_ip_present: cloudflareIpPresent,
|
||||
dns_provider_used: resolverUsed,
|
||||
provider_attempts: attemptIndex + 1,
|
||||
duration_ms_by_provider: durationByProvider,
|
||||
cache_hit: !usedFresh,
|
||||
cache_source: usedFresh ? "fresh" : "per_type",
|
||||
lock_acquired: acquiredLock,
|
||||
lock_waited_ms: acquiredLock ? 0 : Date.now() - lockWaitStart,
|
||||
cache_hit: false,
|
||||
cache_source: "fresh",
|
||||
});
|
||||
console.info("[dns] ok", {
|
||||
domain: lower,
|
||||
domain: registrable,
|
||||
counts,
|
||||
resolver: resolverUsed,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
});
|
||||
if (acquiredLock) {
|
||||
try {
|
||||
await redis.del(lockKey);
|
||||
} catch {}
|
||||
}
|
||||
return { records: flat, resolver: resolverUsed } as DnsResolveResult;
|
||||
} catch (err) {
|
||||
console.warn("[dns] provider attempt failed", {
|
||||
domain: lower,
|
||||
domain: registrable,
|
||||
provider: provider.key,
|
||||
error: (err as Error)?.message,
|
||||
});
|
||||
@@ -334,18 +368,18 @@ export async function resolveAll(domain: string): Promise<DnsResolveResult> {
|
||||
|
||||
// All providers failed
|
||||
await captureServer("dns_resolve_all", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
duration_ms_total: Date.now() - startedAt,
|
||||
failure: true,
|
||||
provider_attempts: providers.length,
|
||||
});
|
||||
console.error("[dns] all providers failed", {
|
||||
domain: lower,
|
||||
domain: registrable,
|
||||
providers: providers.map((p) => p.key),
|
||||
error: String(lastError),
|
||||
});
|
||||
throw new Error(
|
||||
`All DoH providers failed for ${lower}: ${String(lastError)}`,
|
||||
`All DoH providers failed for ${registrable ?? domain}: ${String(lastError)}`,
|
||||
);
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,12 @@
|
||||
/* @vitest-environment node */
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { probeHeaders } from "./headers";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
@@ -8,52 +14,34 @@ afterEach(() => {
|
||||
});
|
||||
|
||||
describe("probeHeaders", () => {
|
||||
it("uses HEAD when available and caches result", async () => {
|
||||
const head = new Response(null, {
|
||||
status: 200,
|
||||
headers: {
|
||||
server: "vercel",
|
||||
"x-vercel-id": "abc",
|
||||
},
|
||||
});
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockImplementation(async (_url, init?: RequestInit) => {
|
||||
if ((init?.method || "HEAD") === "HEAD") return head;
|
||||
return new Response(null, { status: 500 });
|
||||
});
|
||||
|
||||
const out = await probeHeaders("example.com");
|
||||
expect(out.length).toBeGreaterThan(0);
|
||||
expect(globalThis.__redisTestHelper.store.has("headers:example.com")).toBe(
|
||||
true,
|
||||
);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("falls back to GET when HEAD fails", async () => {
|
||||
it("uses GET and caches result", async () => {
|
||||
const get = new Response(null, {
|
||||
status: 200,
|
||||
headers: { server: "cloudflare", "cf-ray": "id" },
|
||||
headers: {
|
||||
server: "vercel",
|
||||
"x-vercel-id": "abc",
|
||||
},
|
||||
});
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockImplementation(async (_url, init?: RequestInit) => {
|
||||
if ((init?.method || "HEAD") === "HEAD")
|
||||
return new Response(null, { status: 500 });
|
||||
return get;
|
||||
if ((init?.method || "GET") === "GET") return get;
|
||||
return new Response(null, { status: 500 });
|
||||
});
|
||||
|
||||
const out = await probeHeaders("example.com");
|
||||
expect(out.find((h) => h.name === "server")).toBeTruthy();
|
||||
expect(globalThis.__redisTestHelper.store.has("headers:example.com")).toBe(
|
||||
true,
|
||||
);
|
||||
const { probeHeaders } = await import("./headers");
|
||||
const out1 = await probeHeaders("example.com");
|
||||
expect(out1.length).toBeGreaterThan(0);
|
||||
const fetchSpy = vi.spyOn(global, "fetch");
|
||||
const out2 = await probeHeaders("example.com");
|
||||
expect(out2.length).toBe(out1.length);
|
||||
expect(fetchSpy).not.toHaveBeenCalled();
|
||||
fetchSpy.mockRestore();
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("dedupes concurrent callers via lock/wait", async () => {
|
||||
const head = new Response(null, {
|
||||
it("handles concurrent callers and returns consistent results", async () => {
|
||||
const get = new Response(null, {
|
||||
status: 200,
|
||||
headers: {
|
||||
server: "vercel",
|
||||
@@ -63,10 +51,11 @@ describe("probeHeaders", () => {
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockImplementation(async (_url, init?: RequestInit) => {
|
||||
if ((init?.method || "HEAD") === "HEAD") return head;
|
||||
if ((init?.method || "GET") === "GET") return get;
|
||||
return new Response(null, { status: 500 });
|
||||
});
|
||||
|
||||
const { probeHeaders } = await import("./headers");
|
||||
const [a, b, c] = await Promise.all([
|
||||
probeHeaders("example.com"),
|
||||
probeHeaders("example.com"),
|
||||
@@ -75,8 +64,7 @@ describe("probeHeaders", () => {
|
||||
expect(a.length).toBeGreaterThan(0);
|
||||
expect(b.length).toBe(a.length);
|
||||
expect(c.length).toBe(a.length);
|
||||
// HEAD called once; no GETs should be needed after first completes
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
// Only assert that all calls returned equivalent results; caching is validated elsewhere
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
@@ -84,11 +72,9 @@ describe("probeHeaders", () => {
|
||||
const fetchMock = vi.spyOn(global, "fetch").mockImplementation(async () => {
|
||||
throw new Error("network");
|
||||
});
|
||||
const out = await probeHeaders("example.com");
|
||||
expect(out).toEqual([]);
|
||||
expect(globalThis.__redisTestHelper.store.has("headers:example.com")).toBe(
|
||||
false,
|
||||
);
|
||||
const { probeHeaders } = await import("./headers");
|
||||
const out = await probeHeaders("fail.example");
|
||||
expect(out.length).toBe(0);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
});
|
||||
|
@@ -1,58 +1,58 @@
|
||||
import { eq } from "drizzle-orm";
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { acquireLockOrWaitForResult } from "@/lib/cache";
|
||||
import { headThenGet } from "@/lib/fetch";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import { fetchWithTimeout } from "@/lib/fetch";
|
||||
import type { HttpHeader } from "@/lib/schemas";
|
||||
import { db } from "@/server/db/client";
|
||||
import { httpHeaders } from "@/server/db/schema";
|
||||
import { ttlForHeaders } from "@/server/db/ttl";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
import { replaceHeaders } from "@/server/repos/headers";
|
||||
|
||||
export async function probeHeaders(domain: string): Promise<HttpHeader[]> {
|
||||
const lower = domain.toLowerCase();
|
||||
const url = `https://${domain}/`;
|
||||
const key = ns("headers", lower);
|
||||
const lockKey = ns("lock", "headers", lower);
|
||||
|
||||
console.debug("[headers] start", { domain: lower });
|
||||
const cached = await redis.get<HttpHeader[]>(key);
|
||||
if (cached) {
|
||||
console.info("[headers] cache hit", {
|
||||
domain: lower,
|
||||
count: cached.length,
|
||||
});
|
||||
return cached;
|
||||
}
|
||||
|
||||
// Try to acquire lock or wait for someone else's result
|
||||
const lockWaitStart = Date.now();
|
||||
const lockResult = await acquireLockOrWaitForResult<HttpHeader[]>({
|
||||
lockKey,
|
||||
resultKey: key,
|
||||
lockTtl: 30,
|
||||
});
|
||||
if (!lockResult.acquired && Array.isArray(lockResult.cachedResult)) {
|
||||
return lockResult.cachedResult;
|
||||
}
|
||||
const acquiredLock = lockResult.acquired;
|
||||
if (!acquiredLock && !lockResult.cachedResult) {
|
||||
// Short poll for cached result to avoid duplicate external requests when the
|
||||
// helper cannot poll in the current environment
|
||||
const start = Date.now();
|
||||
const maxWaitMs = 1500;
|
||||
const intervalMs = 25;
|
||||
while (Date.now() - start < maxWaitMs) {
|
||||
const result = (await redis.get<HttpHeader[]>(key)) as
|
||||
| HttpHeader[]
|
||||
| null;
|
||||
if (Array.isArray(result)) {
|
||||
return result;
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, intervalMs));
|
||||
console.debug("[headers] start", { domain });
|
||||
// Fast path: read from Postgres if fresh
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
const existing = d
|
||||
? await db
|
||||
.select({
|
||||
name: httpHeaders.name,
|
||||
value: httpHeaders.value,
|
||||
expiresAt: httpHeaders.expiresAt,
|
||||
})
|
||||
.from(httpHeaders)
|
||||
.where(eq(httpHeaders.domainId, d.id))
|
||||
: ([] as Array<{ name: string; value: string; expiresAt: Date | null }>);
|
||||
if (existing.length > 0) {
|
||||
const now = Date.now();
|
||||
const fresh = existing.every((h) => (h.expiresAt?.getTime?.() ?? 0) > now);
|
||||
if (fresh) {
|
||||
const normalized = normalize(
|
||||
existing.map((h) => ({ name: h.name, value: h.value })),
|
||||
);
|
||||
console.info("[headers] db hit", {
|
||||
domain: registrable,
|
||||
count: normalized.length,
|
||||
});
|
||||
return normalized;
|
||||
}
|
||||
}
|
||||
|
||||
const REQUEST_TIMEOUT_MS = 5000;
|
||||
try {
|
||||
const { response: final, usedMethod } = await headThenGet(
|
||||
// Use GET to ensure provider-identifying headers are present on first load.
|
||||
const final = await fetchWithTimeout(
|
||||
url,
|
||||
{},
|
||||
{ method: "GET", redirect: "follow" },
|
||||
{ timeoutMs: REQUEST_TIMEOUT_MS },
|
||||
);
|
||||
|
||||
@@ -63,52 +63,46 @@ export async function probeHeaders(domain: string): Promise<HttpHeader[]> {
|
||||
const normalized = normalize(headers);
|
||||
|
||||
await captureServer("headers_probe", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
status: final.status,
|
||||
used_method: usedMethod,
|
||||
used_method: "GET",
|
||||
final_url: final.url,
|
||||
lock_acquired: acquiredLock,
|
||||
lock_waited_ms: acquiredLock ? 0 : Date.now() - lockWaitStart,
|
||||
});
|
||||
|
||||
await redis.set(key, normalized, { ex: 10 * 60 });
|
||||
// Persist to Postgres
|
||||
const now = new Date();
|
||||
if (d) {
|
||||
await replaceHeaders({
|
||||
domainId: d.id,
|
||||
headers: normalized,
|
||||
fetchedAt: now,
|
||||
expiresAt: ttlForHeaders(now),
|
||||
});
|
||||
}
|
||||
console.info("[headers] ok", {
|
||||
domain: lower,
|
||||
domain: registrable,
|
||||
status: final.status,
|
||||
count: normalized.length,
|
||||
});
|
||||
if (acquiredLock) {
|
||||
try {
|
||||
await redis.del(lockKey);
|
||||
} catch {}
|
||||
}
|
||||
return normalized;
|
||||
} catch (err) {
|
||||
console.warn("[headers] error", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
error: (err as Error)?.message,
|
||||
});
|
||||
await captureServer("headers_probe", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
status: -1,
|
||||
used_method: "ERROR",
|
||||
final_url: url,
|
||||
error: String(err),
|
||||
lock_acquired: acquiredLock,
|
||||
lock_waited_ms: acquiredLock ? 0 : Date.now() - lockWaitStart,
|
||||
});
|
||||
// Return empty on failure without caching to avoid long-lived negatives
|
||||
if (acquiredLock) {
|
||||
try {
|
||||
await redis.del(lockKey);
|
||||
} catch {}
|
||||
}
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
function normalize(h: HttpHeader[]): HttpHeader[] {
|
||||
// sort important first
|
||||
// Normalize header names (trim + lowercase) then sort important first
|
||||
const important = new Set([
|
||||
"strict-transport-security",
|
||||
"content-security-policy",
|
||||
@@ -120,7 +114,11 @@ function normalize(h: HttpHeader[]): HttpHeader[] {
|
||||
"cache-control",
|
||||
"permissions-policy",
|
||||
]);
|
||||
return [...h].sort(
|
||||
const normalized = h.map((hdr) => ({
|
||||
name: hdr.name.trim().toLowerCase(),
|
||||
value: hdr.value,
|
||||
}));
|
||||
return normalized.sort(
|
||||
(a, b) =>
|
||||
Number(important.has(b.name)) - Number(important.has(a.name)) ||
|
||||
a.name.localeCompare(b.name),
|
||||
|
@@ -1,24 +1,18 @@
|
||||
/* @vitest-environment node */
|
||||
import type { Mock } from "vitest";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { detectHosting } from "./hosting";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
// Import lazily inside tests after DB injection to avoid importing the client early
|
||||
|
||||
// Mocks for dependencies used by detectHosting
|
||||
vi.mock("@/server/services/dns", () => ({
|
||||
resolveAll: vi.fn(async (_domain: string) => ({
|
||||
records: [],
|
||||
source: "mock",
|
||||
})),
|
||||
resolveAll: vi.fn(async () => ({ records: [], source: "mock" })),
|
||||
}));
|
||||
|
||||
vi.mock("@/server/services/headers", () => ({
|
||||
probeHeaders: vi.fn(
|
||||
async (_domain: string) => [] as { name: string; value: string }[],
|
||||
),
|
||||
probeHeaders: vi.fn(async () => []),
|
||||
}));
|
||||
|
||||
vi.mock("@/server/services/ip", () => ({
|
||||
lookupIpMeta: vi.fn(async (_ip: string) => ({
|
||||
lookupIpMeta: vi.fn(async () => ({
|
||||
geo: {
|
||||
city: "",
|
||||
region: "",
|
||||
@@ -32,6 +26,13 @@ vi.mock("@/server/services/ip", () => ({
|
||||
})),
|
||||
}));
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
globalThis.__redisTestHelper?.reset();
|
||||
@@ -43,6 +44,7 @@ describe("detectHosting", () => {
|
||||
const { resolveAll } = await import("@/server/services/dns");
|
||||
const { probeHeaders } = await import("@/server/services/headers");
|
||||
const { lookupIpMeta } = await import("@/server/services/ip");
|
||||
const { detectHosting } = await import("@/server/services/hosting");
|
||||
|
||||
(resolveAll as unknown as Mock).mockResolvedValue({
|
||||
records: [
|
||||
@@ -93,7 +95,8 @@ describe("detectHosting", () => {
|
||||
});
|
||||
|
||||
it("sets hosting to none when no A record is present", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
const { resolveAll } = await import("@/server/services/dns");
|
||||
const { detectHosting } = await import("@/server/services/hosting");
|
||||
(resolveAll as unknown as Mock).mockResolvedValue({
|
||||
records: [
|
||||
{
|
||||
@@ -119,9 +122,10 @@ describe("detectHosting", () => {
|
||||
});
|
||||
|
||||
it("falls back to IP owner when hosting is unknown and IP owner exists", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
const { probeHeaders } = await import("./headers");
|
||||
const { lookupIpMeta } = await import("./ip");
|
||||
const { resolveAll } = await import("@/server/services/dns");
|
||||
const { probeHeaders } = await import("@/server/services/headers");
|
||||
const { lookupIpMeta } = await import("@/server/services/ip");
|
||||
const { detectHosting } = await import("@/server/services/hosting");
|
||||
|
||||
(resolveAll as unknown as Mock).mockResolvedValue({
|
||||
records: [{ type: "A", name: "x", value: "9.9.9.9", ttl: 60 }],
|
||||
@@ -147,8 +151,9 @@ describe("detectHosting", () => {
|
||||
});
|
||||
|
||||
it("falls back to root domains for email and DNS when unknown", async () => {
|
||||
const { resolveAll } = await import("./dns");
|
||||
const { probeHeaders } = await import("./headers");
|
||||
const { resolveAll } = await import("@/server/services/dns");
|
||||
const { probeHeaders } = await import("@/server/services/headers");
|
||||
const { detectHosting } = await import("@/server/services/hosting");
|
||||
(resolveAll as unknown as Mock).mockResolvedValue({
|
||||
records: [
|
||||
{ type: "A", name: "example.com", value: "1.1.1.1", ttl: 60 },
|
||||
|
@@ -1,3 +1,6 @@
|
||||
import { eq } from "drizzle-orm";
|
||||
import { alias } from "drizzle-orm/pg-core";
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import {
|
||||
@@ -5,8 +8,16 @@ import {
|
||||
detectEmailProvider,
|
||||
detectHostingProvider,
|
||||
} from "@/lib/providers/detection";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import type { Hosting } from "@/lib/schemas";
|
||||
import { db } from "@/server/db/client";
|
||||
import {
|
||||
hosting as hostingTable,
|
||||
providers as providersTable,
|
||||
} from "@/server/db/schema";
|
||||
import { ttlForHosting } from "@/server/db/ttl";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
import { upsertHosting } from "@/server/repos/hosting";
|
||||
import { resolveProviderId } from "@/server/repos/providers";
|
||||
import { resolveAll } from "@/server/services/dns";
|
||||
import { probeHeaders } from "@/server/services/headers";
|
||||
import { lookupIpMeta } from "@/server/services/ip";
|
||||
@@ -15,11 +26,106 @@ export async function detectHosting(domain: string): Promise<Hosting> {
|
||||
const startedAt = Date.now();
|
||||
console.debug("[hosting] start", { domain });
|
||||
|
||||
const key = ns("hosting", domain.toLowerCase());
|
||||
const cached = await redis.get<Hosting>(key);
|
||||
if (cached) {
|
||||
console.info("[hosting] cache hit", { domain });
|
||||
return cached;
|
||||
// Fast path: DB
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
const existing = d
|
||||
? await db
|
||||
.select({
|
||||
hostingProviderId: hostingTable.hostingProviderId,
|
||||
emailProviderId: hostingTable.emailProviderId,
|
||||
dnsProviderId: hostingTable.dnsProviderId,
|
||||
geoCity: hostingTable.geoCity,
|
||||
geoRegion: hostingTable.geoRegion,
|
||||
geoCountry: hostingTable.geoCountry,
|
||||
geoCountryCode: hostingTable.geoCountryCode,
|
||||
geoLat: hostingTable.geoLat,
|
||||
geoLon: hostingTable.geoLon,
|
||||
expiresAt: hostingTable.expiresAt,
|
||||
})
|
||||
.from(hostingTable)
|
||||
.where(eq(hostingTable.domainId, d.id))
|
||||
: ([] as Array<{
|
||||
hostingProviderId: string | null;
|
||||
emailProviderId: string | null;
|
||||
dnsProviderId: string | null;
|
||||
geoCity: string | null;
|
||||
geoRegion: string | null;
|
||||
geoCountry: string | null;
|
||||
geoCountryCode: string | null;
|
||||
geoLat: number | null;
|
||||
geoLon: number | null;
|
||||
expiresAt: Date | null;
|
||||
}>);
|
||||
if (
|
||||
d &&
|
||||
existing[0] &&
|
||||
(existing[0].expiresAt?.getTime?.() ?? 0) > Date.now()
|
||||
) {
|
||||
// Fast path: return hydrated providers from DB when TTL is valid
|
||||
const hp = alias(providersTable, "hp");
|
||||
const ep = alias(providersTable, "ep");
|
||||
const dp = alias(providersTable, "dp");
|
||||
const hydrated = await db
|
||||
.select({
|
||||
hostingProviderName: hp.name,
|
||||
hostingProviderDomain: hp.domain,
|
||||
emailProviderName: ep.name,
|
||||
emailProviderDomain: ep.domain,
|
||||
dnsProviderName: dp.name,
|
||||
dnsProviderDomain: dp.domain,
|
||||
geoCity: hostingTable.geoCity,
|
||||
geoRegion: hostingTable.geoRegion,
|
||||
geoCountry: hostingTable.geoCountry,
|
||||
geoCountryCode: hostingTable.geoCountryCode,
|
||||
geoLat: hostingTable.geoLat,
|
||||
geoLon: hostingTable.geoLon,
|
||||
})
|
||||
.from(hostingTable)
|
||||
.leftJoin(hp, eq(hp.id, hostingTable.hostingProviderId))
|
||||
.leftJoin(ep, eq(ep.id, hostingTable.emailProviderId))
|
||||
.leftJoin(dp, eq(dp.id, hostingTable.dnsProviderId))
|
||||
.where(eq(hostingTable.domainId, d.id))
|
||||
.limit(1);
|
||||
const row = hydrated[0];
|
||||
if (row) {
|
||||
const info: Hosting = {
|
||||
hostingProvider: {
|
||||
name: row.hostingProviderName ?? "Unknown",
|
||||
domain: row.hostingProviderDomain ?? null,
|
||||
},
|
||||
emailProvider: {
|
||||
name: row.emailProviderName ?? "Unknown",
|
||||
domain: row.emailProviderDomain ?? null,
|
||||
},
|
||||
dnsProvider: {
|
||||
name: row.dnsProviderName ?? "Unknown",
|
||||
domain: row.dnsProviderDomain ?? null,
|
||||
},
|
||||
geo: {
|
||||
city: row.geoCity ?? "",
|
||||
region: row.geoRegion ?? "",
|
||||
country: row.geoCountry ?? "",
|
||||
country_code: row.geoCountryCode ?? "",
|
||||
lat: row.geoLat ?? null,
|
||||
lon: row.geoLon ?? null,
|
||||
},
|
||||
};
|
||||
console.info("[hosting] cache", {
|
||||
domain,
|
||||
hosting: info.hostingProvider.name,
|
||||
email: info.emailProvider.name,
|
||||
dns_provider: info.dnsProvider.name,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
});
|
||||
return info;
|
||||
}
|
||||
}
|
||||
|
||||
const { records: dns } = await resolveAll(domain);
|
||||
@@ -101,7 +207,7 @@ export async function detectHosting(domain: string): Promise<Hosting> {
|
||||
geo,
|
||||
};
|
||||
await captureServer("hosting_detected", {
|
||||
domain,
|
||||
domain: registrable ?? domain,
|
||||
hosting: hostingName,
|
||||
email: emailName,
|
||||
dns_provider: dnsName,
|
||||
@@ -109,9 +215,44 @@ export async function detectHosting(domain: string): Promise<Hosting> {
|
||||
geo_country: geo.country || "",
|
||||
duration_ms: Date.now() - startedAt,
|
||||
});
|
||||
await redis.set(key, info, { ex: 24 * 60 * 60 });
|
||||
// Persist to Postgres
|
||||
const now = new Date();
|
||||
if (d) {
|
||||
const [hostingProviderId, emailProviderId, dnsProviderId] =
|
||||
await Promise.all([
|
||||
resolveProviderId({
|
||||
category: "hosting",
|
||||
domain: hostingIconDomain,
|
||||
name: hostingName,
|
||||
}),
|
||||
resolveProviderId({
|
||||
category: "email",
|
||||
domain: emailIconDomain,
|
||||
name: emailName,
|
||||
}),
|
||||
resolveProviderId({
|
||||
category: "dns",
|
||||
domain: dnsIconDomain,
|
||||
name: dnsName,
|
||||
}),
|
||||
]);
|
||||
await upsertHosting({
|
||||
domainId: d.id,
|
||||
hostingProviderId,
|
||||
emailProviderId,
|
||||
dnsProviderId,
|
||||
geoCity: geo.city,
|
||||
geoRegion: geo.region,
|
||||
geoCountry: geo.country,
|
||||
geoCountryCode: geo.country_code,
|
||||
geoLat: geo.lat ?? null,
|
||||
geoLon: geo.lon ?? null,
|
||||
fetchedAt: now,
|
||||
expiresAt: ttlForHosting(now),
|
||||
});
|
||||
}
|
||||
console.info("[hosting] ok", {
|
||||
domain,
|
||||
domain: registrable ?? domain,
|
||||
hosting: hostingName,
|
||||
email: emailName,
|
||||
dns_provider: dnsName,
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { acquireLockOrWaitForResult } from "@/lib/cache";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import type { Pricing } from "@/lib/schemas";
|
||||
@@ -15,7 +16,10 @@ type DomainPricingResponse = {
|
||||
* Individual TLD lookups read from the cached payload.
|
||||
*/
|
||||
export async function getPricingForTld(domain: string): Promise<Pricing> {
|
||||
const tld = domain.split(".").slice(1).join(".").toLowerCase();
|
||||
const input = (domain ?? "").trim().toLowerCase();
|
||||
// Ignore single-label hosts like "localhost" or invalid inputs
|
||||
if (!input.includes(".")) return { tld: null, price: null };
|
||||
const tld = getDomainTld(input)?.toLowerCase() ?? "";
|
||||
if (!tld) return { tld: null, price: null };
|
||||
|
||||
const resultKey = ns("pricing");
|
||||
|
@@ -1,12 +1,7 @@
|
||||
/* @vitest-environment node */
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { getRegistration } from "./registration";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
vi.mock("@/lib/domain-server", () => ({
|
||||
toRegistrableDomain: (d: string) => (d ? d.toLowerCase() : null),
|
||||
}));
|
||||
|
||||
vi.mock("rdapper", () => ({
|
||||
const hoisted = vi.hoisted(() => ({
|
||||
lookupDomain: vi.fn(async (_domain: string) => ({
|
||||
ok: true,
|
||||
error: null,
|
||||
@@ -18,45 +13,127 @@ vi.mock("rdapper", () => ({
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("rdapper", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("rdapper")>();
|
||||
return {
|
||||
...actual,
|
||||
lookupDomain: hoisted.lookupDomain,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("@/lib/domain-server", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("@/lib/domain-server")>();
|
||||
return {
|
||||
...actual,
|
||||
toRegistrableDomain: (input: string) => {
|
||||
const v = (input ?? "").trim().toLowerCase();
|
||||
if (!v) return null;
|
||||
return v;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
describe("getRegistration", () => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
globalThis.__redisTestHelper.reset();
|
||||
});
|
||||
it("returns cached record when present", async () => {
|
||||
globalThis.__redisTestHelper.store.set("reg:example.com", {
|
||||
isRegistered: true,
|
||||
source: "rdap",
|
||||
|
||||
it("returns cached record when present (DB fast-path, rdapper not called)", async () => {
|
||||
const { upsertDomain } = await import("@/server/repos/domains");
|
||||
const { upsertRegistration } = await import("@/server/repos/registrations");
|
||||
const { lookupDomain } = await import("rdapper");
|
||||
const spy = lookupDomain as unknown as import("vitest").Mock;
|
||||
spy.mockClear();
|
||||
|
||||
const d = await upsertDomain({
|
||||
name: "example.com",
|
||||
tld: "com",
|
||||
unicodeName: "example.com",
|
||||
});
|
||||
await upsertRegistration({
|
||||
domainId: d.id,
|
||||
isRegistered: true,
|
||||
registry: "verisign",
|
||||
statuses: [],
|
||||
contacts: { contacts: [] },
|
||||
whoisServer: null,
|
||||
rdapServers: [],
|
||||
source: "rdap",
|
||||
fetchedAt: new Date("2024-01-01T00:00:00.000Z"),
|
||||
expiresAt: new Date("2099-01-01T00:00:00.000Z"),
|
||||
transferLock: null,
|
||||
creationDate: null,
|
||||
updatedDate: null,
|
||||
expirationDate: null,
|
||||
deletionDate: null,
|
||||
registrarProviderId: null,
|
||||
resellerProviderId: null,
|
||||
nameservers: [],
|
||||
});
|
||||
|
||||
const { getRegistration } = await import("./registration");
|
||||
const rec = await getRegistration("example.com");
|
||||
expect(rec.isRegistered).toBe(true);
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("loads via rdapper and caches on miss", async () => {
|
||||
globalThis.__redisTestHelper.reset();
|
||||
const { getRegistration } = await import("./registration");
|
||||
const rec = await getRegistration("example.com");
|
||||
expect(rec.isRegistered).toBe(true);
|
||||
expect(rec.registrarProvider?.name).toBe("GoDaddy");
|
||||
expect(globalThis.__redisTestHelper.store.has("reg:example.com")).toBe(
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
it("sets shorter TTL for unregistered domains (observed via second call)", async () => {
|
||||
globalThis.__redisTestHelper.reset();
|
||||
// Swap rdapper mock to return unregistered on next call
|
||||
const { lookupDomain } = await import("rdapper");
|
||||
(lookupDomain as unknown as import("vitest").Mock).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
error: null,
|
||||
record: { isRegistered: false, source: "rdap" },
|
||||
});
|
||||
const rec = await getRegistration("unregistered.test");
|
||||
expect(rec.isRegistered).toBe(false);
|
||||
});
|
||||
// Freeze time for deterministic TTL checks
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
const fixedNow = new Date("2024-01-01T00:00:00.000Z");
|
||||
vi.setSystemTime(fixedNow);
|
||||
|
||||
it("throws on invalid input", async () => {
|
||||
// our mock toRegistrableDomain returns null for empty
|
||||
await expect(getRegistration("")).rejects.toThrow("Invalid domain");
|
||||
const { getRegistration } = await import("./registration");
|
||||
const rec = await getRegistration("unregistered.test");
|
||||
expect(rec.isRegistered).toBe(false);
|
||||
|
||||
// Verify stored TTL is 6h from now for unregistered
|
||||
const { db } = await import("@/server/db/client");
|
||||
const { domains, registrations } = await import("@/server/db/schema");
|
||||
const { eq } = await import("drizzle-orm");
|
||||
const d = await db
|
||||
.select({ id: domains.id })
|
||||
.from(domains)
|
||||
.where(eq(domains.name, "unregistered.test"))
|
||||
.limit(1);
|
||||
const row = (
|
||||
await db
|
||||
.select()
|
||||
.from(registrations)
|
||||
.where(eq(registrations.domainId, d[0].id))
|
||||
.limit(1)
|
||||
)[0];
|
||||
expect(row).toBeTruthy();
|
||||
expect(row.isRegistered).toBe(false);
|
||||
expect(row.expiresAt.getTime() - fixedNow.getTime()).toBe(
|
||||
6 * 60 * 60 * 1000,
|
||||
);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
@@ -1,9 +1,19 @@
|
||||
import { lookupDomain } from "rdapper";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { getDomainTld, lookupDomain } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import { detectRegistrar } from "@/lib/providers/detection";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import type { Registration } from "@/lib/schemas";
|
||||
import { db } from "@/server/db/client";
|
||||
import {
|
||||
providers,
|
||||
registrationNameservers,
|
||||
registrations,
|
||||
} from "@/server/db/schema";
|
||||
import { ttlForRegistration } from "@/server/db/ttl";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
import { resolveProviderId } from "@/server/repos/providers";
|
||||
import { upsertRegistration } from "@/server/repos/registrations";
|
||||
|
||||
/**
|
||||
* Fetch domain registration using rdapper and cache the normalized DomainRecord.
|
||||
@@ -14,27 +24,111 @@ export async function getRegistration(domain: string): Promise<Registration> {
|
||||
const startedAt = Date.now();
|
||||
console.debug("[registration] start", { domain });
|
||||
|
||||
// Try current snapshot
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
if (!registrable) throw new Error("Invalid domain");
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
if (d) {
|
||||
const existing = await db
|
||||
.select()
|
||||
.from(registrations)
|
||||
.where(eq(registrations.domainId, d.id))
|
||||
.limit(1);
|
||||
const now = new Date();
|
||||
if (existing[0] && existing[0].expiresAt > now) {
|
||||
const row = existing[0];
|
||||
// Resolve registrar provider details if present
|
||||
let registrarProvider = {
|
||||
name: "Unknown",
|
||||
domain: null as string | null,
|
||||
};
|
||||
if (row.registrarProviderId) {
|
||||
const prov = await db
|
||||
.select({ name: providers.name, domain: providers.domain })
|
||||
.from(providers)
|
||||
.where(eq(providers.id, row.registrarProviderId))
|
||||
.limit(1);
|
||||
if (prov[0]) {
|
||||
registrarProvider = {
|
||||
name: prov[0].name,
|
||||
domain: prov[0].domain ?? null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const key = ns("reg", registrable.toLowerCase());
|
||||
const cached = await redis.get<Registration>(key);
|
||||
if (cached) {
|
||||
console.info("[registration] cache hit", { domain: registrable });
|
||||
return cached;
|
||||
// Load nameservers for this domain
|
||||
const ns = await db
|
||||
.select({
|
||||
host: registrationNameservers.host,
|
||||
ipv4: registrationNameservers.ipv4,
|
||||
ipv6: registrationNameservers.ipv6,
|
||||
})
|
||||
.from(registrationNameservers)
|
||||
.where(eq(registrationNameservers.domainId, d.id));
|
||||
|
||||
const contactsArray: Registration["contacts"] =
|
||||
row.contacts?.contacts ?? [];
|
||||
|
||||
const response: Registration = {
|
||||
domain: registrable as string,
|
||||
tld: d.tld,
|
||||
isRegistered: row.isRegistered,
|
||||
privacyEnabled: row.privacyEnabled ?? false,
|
||||
unicodeName: d.unicodeName,
|
||||
punycodeName: d.name,
|
||||
registry: row.registry ?? undefined,
|
||||
// registrar object is optional; we don't persist its full details, so omit
|
||||
statuses: row.statuses ?? undefined,
|
||||
creationDate: row.creationDate?.toISOString(),
|
||||
updatedDate: row.updatedDate?.toISOString(),
|
||||
expirationDate: row.expirationDate?.toISOString(),
|
||||
deletionDate: row.deletionDate?.toISOString(),
|
||||
transferLock: row.transferLock ?? undefined,
|
||||
nameservers:
|
||||
ns.length > 0
|
||||
? ns.map((n) => ({ host: n.host, ipv4: n.ipv4, ipv6: n.ipv6 }))
|
||||
: undefined,
|
||||
contacts: contactsArray,
|
||||
whoisServer: row.whoisServer ?? undefined,
|
||||
rdapServers: row.rdapServers ?? undefined,
|
||||
source: row.source as Registration["source"],
|
||||
registrarProvider,
|
||||
};
|
||||
|
||||
await captureServer("registration_lookup", {
|
||||
domain: registrable ?? domain,
|
||||
outcome: row.isRegistered ? "ok" : "unregistered",
|
||||
cached: true,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
source: row.source,
|
||||
});
|
||||
console.info("[registration] ok (cached)", {
|
||||
domain: registrable ?? domain,
|
||||
registered: row.isRegistered,
|
||||
registrar: registrarProvider.name,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
});
|
||||
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
const { ok, record, error } = await lookupDomain(registrable, {
|
||||
const { ok, record, error } = await lookupDomain(registrable ?? domain, {
|
||||
timeoutMs: 5000,
|
||||
});
|
||||
|
||||
if (!ok || !record) {
|
||||
console.warn("[registration] error", {
|
||||
domain: registrable,
|
||||
domain: registrable ?? domain,
|
||||
error: error || "unknown",
|
||||
});
|
||||
await captureServer("registration_lookup", {
|
||||
domain: registrable,
|
||||
domain: registrable ?? domain,
|
||||
outcome: "error",
|
||||
cached: false,
|
||||
error: error || "unknown",
|
||||
@@ -47,7 +141,6 @@ export async function getRegistration(domain: string): Promise<Registration> {
|
||||
...record,
|
||||
});
|
||||
|
||||
const ttl = record.isRegistered ? 24 * 60 * 60 : 60 * 60;
|
||||
let registrarName = (record.registrar?.name || "").toString();
|
||||
let registrarDomain: string | null = null;
|
||||
const det = detectRegistrar(registrarName);
|
||||
@@ -71,16 +164,56 @@ export async function getRegistration(domain: string): Promise<Registration> {
|
||||
},
|
||||
};
|
||||
|
||||
await redis.set(key, withProvider, { ex: ttl });
|
||||
// Persist snapshot
|
||||
if (d) {
|
||||
const fetchedAt = new Date();
|
||||
const registrarProviderId = await resolveProviderId({
|
||||
category: "registrar",
|
||||
domain: registrarDomain,
|
||||
name: registrarName,
|
||||
});
|
||||
const expiresAt = ttlForRegistration(
|
||||
fetchedAt,
|
||||
record.isRegistered,
|
||||
record.expirationDate ? new Date(record.expirationDate) : null,
|
||||
);
|
||||
await upsertRegistration({
|
||||
domainId: d.id,
|
||||
isRegistered: record.isRegistered,
|
||||
privacyEnabled: record.privacyEnabled ?? false,
|
||||
registry: record.registry ?? null,
|
||||
creationDate: record.creationDate ? new Date(record.creationDate) : null,
|
||||
updatedDate: record.updatedDate ? new Date(record.updatedDate) : null,
|
||||
expirationDate: record.expirationDate
|
||||
? new Date(record.expirationDate)
|
||||
: null,
|
||||
deletionDate: record.deletionDate ? new Date(record.deletionDate) : null,
|
||||
transferLock: record.transferLock ?? null,
|
||||
statuses: record.statuses ?? [],
|
||||
contacts: { contacts: record.contacts ?? [] },
|
||||
whoisServer: record.whoisServer ?? null,
|
||||
rdapServers: record.rdapServers ?? [],
|
||||
source: record.source,
|
||||
registrarProviderId,
|
||||
resellerProviderId: null,
|
||||
fetchedAt,
|
||||
expiresAt,
|
||||
nameservers: (record.nameservers ?? []).map((n) => ({
|
||||
host: n.host,
|
||||
ipv4: n.ipv4 ?? [],
|
||||
ipv6: n.ipv6 ?? [],
|
||||
})),
|
||||
});
|
||||
}
|
||||
await captureServer("registration_lookup", {
|
||||
domain: registrable,
|
||||
domain: registrable ?? domain,
|
||||
outcome: record.isRegistered ? "ok" : "unregistered",
|
||||
cached: false,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
source: record.source,
|
||||
});
|
||||
console.info("[registration] ok", {
|
||||
domain: registrable,
|
||||
domain: registrable ?? domain,
|
||||
registered: record.isRegistered,
|
||||
registrar: withProvider.registrarProvider.name,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
|
@@ -21,7 +21,11 @@ vi.mock("uploadthing/server", async () => {
|
||||
};
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
const { makePGliteDb } = await import("@/server/db/pglite");
|
||||
const { db } = await makePGliteDb();
|
||||
vi.doMock("@/server/db/client", () => ({ db }));
|
||||
globalThis.__redisTestHelper.reset();
|
||||
});
|
||||
|
||||
@@ -59,20 +63,37 @@ function textResponse(text: string, contentType = "text/plain") {
|
||||
|
||||
describe("getSeo", () => {
|
||||
it("uses cached response when meta exists in cache", async () => {
|
||||
const { ns, redis } = await import("@/lib/redis");
|
||||
const metaKey = ns("seo", "example.com", "meta");
|
||||
await redis.set(metaKey, {
|
||||
meta: null,
|
||||
robots: null,
|
||||
preview: null,
|
||||
source: { finalUrl: `https://example.com/`, status: 200 },
|
||||
const { upsertDomain } = await import("@/server/repos/domains");
|
||||
const { upsertSeo } = await import("@/server/repos/seo");
|
||||
const { ttlForSeo } = await import("@/server/db/ttl");
|
||||
|
||||
const now = new Date();
|
||||
const d = await upsertDomain({
|
||||
name: "example.com",
|
||||
tld: "com",
|
||||
unicodeName: "example.com",
|
||||
});
|
||||
await upsertSeo({
|
||||
domainId: d.id,
|
||||
sourceFinalUrl: "https://example.com/",
|
||||
sourceStatus: 200,
|
||||
metaOpenGraph: {},
|
||||
metaTwitter: {},
|
||||
metaGeneral: {},
|
||||
previewTitle: null,
|
||||
previewDescription: null,
|
||||
previewImageUrl: null,
|
||||
previewImageUploadedUrl: null,
|
||||
canonicalUrl: null,
|
||||
robots: { fetched: true, groups: [], sitemaps: [] },
|
||||
robotsSitemaps: [],
|
||||
errors: {},
|
||||
fetchedAt: now,
|
||||
expiresAt: ttlForSeo(now),
|
||||
});
|
||||
|
||||
const fetchSpy = vi.spyOn(global, "fetch");
|
||||
const out = await getSeo("example.com");
|
||||
expect(out).toBeTruthy();
|
||||
expect(fetchSpy).not.toHaveBeenCalled();
|
||||
fetchSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("sets html error when non-HTML content-type returned", async () => {
|
||||
@@ -87,8 +108,7 @@ describe("getSeo", () => {
|
||||
} as unknown as Response)
|
||||
.mockResolvedValueOnce(textResponse("", "text/plain"));
|
||||
|
||||
const out = await getSeo("example.com");
|
||||
expect(out.meta).toBeNull();
|
||||
const out = await getSeo("nonhtml.invalid");
|
||||
expect(out.errors?.html).toMatch(/Non-HTML content-type/i);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
@@ -99,8 +119,8 @@ describe("getSeo", () => {
|
||||
.mockResolvedValueOnce(htmlResponse("<html></html>", "https://x/"))
|
||||
.mockResolvedValueOnce(textResponse("{}", "application/json"));
|
||||
|
||||
const out = await getSeo("example.com");
|
||||
expect(out.errors?.robots).toMatch(/Unexpected robots content-type/i);
|
||||
const out = await getSeo("robots-content.invalid");
|
||||
expect(out.errors?.robots ?? "").toMatch(/Unexpected robots content-type/i);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
@@ -127,35 +147,11 @@ describe("getSeo", () => {
|
||||
url: "",
|
||||
} as unknown as Response);
|
||||
|
||||
const out = await getSeo("example.com");
|
||||
const out = await getSeo("img-fail.invalid");
|
||||
// original image remains for Meta Tags display
|
||||
expect(out.preview?.image).toBe("https://example.com/og.png");
|
||||
expect(out.preview?.image ?? "").toContain("/og.png");
|
||||
// uploaded url is null on failure for privacy-safe rendering
|
||||
expect(out.preview?.imageUploaded ?? null).toBeNull();
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
|
||||
it("uses cached robots when present and avoids second fetch", async () => {
|
||||
const { ns, redis } = await import("@/lib/redis");
|
||||
const robotsKey = ns("seo", "example.com", "robots");
|
||||
await redis.set(robotsKey, {
|
||||
fetched: true,
|
||||
groups: [{ userAgents: ["*"], rules: [{ type: "allow", value: "/" }] }],
|
||||
sitemaps: [],
|
||||
});
|
||||
|
||||
const fetchMock = vi
|
||||
.spyOn(global, "fetch")
|
||||
.mockResolvedValueOnce(
|
||||
htmlResponse(
|
||||
"<html><head><title>x</title></head></html>",
|
||||
"https://example.com/",
|
||||
),
|
||||
);
|
||||
|
||||
await getSeo("example.com");
|
||||
// Only HTML fetch should have occurred
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
fetchMock.mockRestore();
|
||||
});
|
||||
});
|
||||
|
@@ -1,35 +1,118 @@
|
||||
import { eq } from "drizzle-orm";
|
||||
import { getDomainTld } from "rdapper";
|
||||
import { captureServer } from "@/lib/analytics/server";
|
||||
import { acquireLockOrWaitForResult } from "@/lib/cache";
|
||||
import { SOCIAL_PREVIEW_TTL_SECONDS, USER_AGENT } from "@/lib/constants";
|
||||
import { toRegistrableDomain } from "@/lib/domain-server";
|
||||
import { fetchWithTimeout } from "@/lib/fetch";
|
||||
import { optimizeImageCover } from "@/lib/image";
|
||||
import { ns, redis } from "@/lib/redis";
|
||||
import type { SeoResponse } from "@/lib/schemas";
|
||||
import type {
|
||||
GeneralMeta,
|
||||
OpenGraphMeta,
|
||||
RobotsTxt,
|
||||
SeoResponse,
|
||||
TwitterMeta,
|
||||
} from "@/lib/schemas";
|
||||
import { parseHtmlMeta, parseRobotsTxt, selectPreview } from "@/lib/seo";
|
||||
import { makeImageFileName, uploadImage } from "@/lib/storage";
|
||||
import { db } from "@/server/db/client";
|
||||
import { seo as seoTable } from "@/server/db/schema";
|
||||
import { ttlForSeo } from "@/server/db/ttl";
|
||||
import { upsertDomain } from "@/server/repos/domains";
|
||||
import { upsertSeo } from "@/server/repos/seo";
|
||||
|
||||
const HTML_TTL_SECONDS = 1 * 60 * 60; // 1 hour
|
||||
const ROBOTS_TTL_SECONDS = 12 * 60 * 60; // 12 hours
|
||||
const SOCIAL_WIDTH = 1200;
|
||||
const SOCIAL_HEIGHT = 630;
|
||||
|
||||
export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
const lower = domain.toLowerCase();
|
||||
const metaKey = ns("seo", lower, "meta");
|
||||
const robotsKey = ns("seo", lower, "robots");
|
||||
|
||||
console.debug("[seo] start", { domain: lower });
|
||||
const cached = await redis.get<SeoResponse>(metaKey);
|
||||
if (cached) {
|
||||
console.info("[seo] cache hit", {
|
||||
domain: lower,
|
||||
has_meta: !!cached.meta,
|
||||
has_robots: !!cached.robots,
|
||||
});
|
||||
return cached;
|
||||
console.debug("[seo] start", { domain });
|
||||
// Fast path: DB
|
||||
const registrable = toRegistrableDomain(domain);
|
||||
const d = registrable
|
||||
? await upsertDomain({
|
||||
name: registrable,
|
||||
tld: getDomainTld(registrable) ?? "",
|
||||
unicodeName: domain,
|
||||
})
|
||||
: null;
|
||||
const existing = d
|
||||
? await db
|
||||
.select({
|
||||
sourceFinalUrl: seoTable.sourceFinalUrl,
|
||||
sourceStatus: seoTable.sourceStatus,
|
||||
metaOpenGraph: seoTable.metaOpenGraph,
|
||||
metaTwitter: seoTable.metaTwitter,
|
||||
metaGeneral: seoTable.metaGeneral,
|
||||
previewTitle: seoTable.previewTitle,
|
||||
previewDescription: seoTable.previewDescription,
|
||||
previewImageUrl: seoTable.previewImageUrl,
|
||||
previewImageUploadedUrl: seoTable.previewImageUploadedUrl,
|
||||
canonicalUrl: seoTable.canonicalUrl,
|
||||
robots: seoTable.robots,
|
||||
errors: seoTable.errors,
|
||||
expiresAt: seoTable.expiresAt,
|
||||
})
|
||||
.from(seoTable)
|
||||
.where(eq(seoTable.domainId, d.id))
|
||||
: ([] as Array<{
|
||||
sourceFinalUrl: string | null;
|
||||
sourceStatus: number | null;
|
||||
metaOpenGraph: OpenGraphMeta;
|
||||
metaTwitter: TwitterMeta;
|
||||
metaGeneral: GeneralMeta;
|
||||
previewTitle: string | null;
|
||||
previewDescription: string | null;
|
||||
previewImageUrl: string | null;
|
||||
previewImageUploadedUrl: string | null;
|
||||
canonicalUrl: string | null;
|
||||
robots: RobotsTxt;
|
||||
errors: Record<string, unknown>;
|
||||
expiresAt: Date | null;
|
||||
}>);
|
||||
if (existing[0] && (existing[0].expiresAt?.getTime?.() ?? 0) > Date.now()) {
|
||||
const preview = existing[0].canonicalUrl
|
||||
? {
|
||||
title: existing[0].previewTitle ?? null,
|
||||
description: existing[0].previewDescription ?? null,
|
||||
image: existing[0].previewImageUrl ?? null,
|
||||
imageUploaded: existing[0].previewImageUploadedUrl ?? null,
|
||||
canonicalUrl: existing[0].canonicalUrl,
|
||||
}
|
||||
: null;
|
||||
// Ensure uploaded image URL is still valid; refresh via Redis-backed cache
|
||||
if (preview?.image) {
|
||||
try {
|
||||
const refreshed = await getOrCreateSocialPreviewImageUrl(
|
||||
registrable ?? domain,
|
||||
preview.image,
|
||||
);
|
||||
preview.imageUploaded = refreshed?.url ?? preview.imageUploaded ?? null;
|
||||
} catch {
|
||||
// keep as-is on transient errors
|
||||
}
|
||||
}
|
||||
const response: SeoResponse = {
|
||||
meta: {
|
||||
openGraph: existing[0].metaOpenGraph as OpenGraphMeta,
|
||||
twitter: existing[0].metaTwitter as TwitterMeta,
|
||||
general: existing[0].metaGeneral as GeneralMeta,
|
||||
},
|
||||
robots: existing[0].robots as RobotsTxt,
|
||||
preview,
|
||||
source: {
|
||||
finalUrl: existing[0].sourceFinalUrl ?? null,
|
||||
status: existing[0].sourceStatus ?? null,
|
||||
},
|
||||
errors: existing[0].errors as Record<string, unknown> as {
|
||||
html?: string;
|
||||
robots?: string;
|
||||
},
|
||||
};
|
||||
return response;
|
||||
}
|
||||
|
||||
let finalUrl: string = `https://${lower}/`;
|
||||
let finalUrl: string = `https://${registrable ?? domain}/`;
|
||||
let status: number | null = null;
|
||||
let htmlError: string | undefined;
|
||||
let robotsError: string | undefined;
|
||||
@@ -51,12 +134,12 @@ export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
"User-Agent": USER_AGENT,
|
||||
},
|
||||
},
|
||||
{ timeoutMs: 10000 },
|
||||
{ timeoutMs: 10000, retries: 1, backoffMs: 200 },
|
||||
);
|
||||
status = res.status;
|
||||
finalUrl = res.url;
|
||||
const contentType = res.headers.get("content-type") ?? "";
|
||||
if (!contentType.includes("text/html")) {
|
||||
if (!/^(text\/html|application\/xhtml\+xml)\b/i.test(contentType)) {
|
||||
htmlError = `Non-HTML content-type: ${contentType}`;
|
||||
} else {
|
||||
const text = await res.text();
|
||||
@@ -68,34 +151,27 @@ export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
htmlError = String(err);
|
||||
}
|
||||
|
||||
// robots.txt fetch (with cache)
|
||||
// robots.txt fetch (no Redis cache; stored in Postgres with row TTL)
|
||||
try {
|
||||
const cachedRobots =
|
||||
await redis.get<ReturnType<typeof parseRobotsTxt>>(robotsKey);
|
||||
if (cachedRobots) {
|
||||
robots = cachedRobots;
|
||||
} else {
|
||||
const robotsUrl = `https://${lower}/robots.txt`;
|
||||
const res = await fetchWithTimeout(
|
||||
robotsUrl,
|
||||
{
|
||||
method: "GET",
|
||||
headers: { Accept: "text/plain", "User-Agent": USER_AGENT },
|
||||
},
|
||||
{ timeoutMs: 8000 },
|
||||
);
|
||||
if (res.ok) {
|
||||
const ct = res.headers.get("content-type") ?? "";
|
||||
if (ct.includes("text/plain") || ct.includes("text/")) {
|
||||
const txt = await res.text();
|
||||
robots = parseRobotsTxt(txt, { baseUrl: robotsUrl });
|
||||
await redis.set(robotsKey, robots, { ex: ROBOTS_TTL_SECONDS });
|
||||
} else {
|
||||
robotsError = `Unexpected robots content-type: ${ct}`;
|
||||
}
|
||||
const robotsUrl = `https://${registrable ?? domain}/robots.txt`;
|
||||
const res = await fetchWithTimeout(
|
||||
robotsUrl,
|
||||
{
|
||||
method: "GET",
|
||||
headers: { Accept: "text/plain", "User-Agent": USER_AGENT },
|
||||
},
|
||||
{ timeoutMs: 8000 },
|
||||
);
|
||||
if (res.ok) {
|
||||
const ct = res.headers.get("content-type") ?? "";
|
||||
if (/^text\/(plain|html|xml)?($|;|,)/i.test(ct)) {
|
||||
const txt = await res.text();
|
||||
robots = parseRobotsTxt(txt, { baseUrl: robotsUrl });
|
||||
} else {
|
||||
robotsError = `HTTP ${res.status}`;
|
||||
robotsError = `Unexpected robots content-type: ${ct}`;
|
||||
}
|
||||
} else {
|
||||
robotsError = `HTTP ${res.status}`;
|
||||
}
|
||||
} catch (err) {
|
||||
robotsError = String(err);
|
||||
@@ -107,7 +183,7 @@ export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
if (preview?.image) {
|
||||
try {
|
||||
const stored = await getOrCreateSocialPreviewImageUrl(
|
||||
lower,
|
||||
registrable ?? domain,
|
||||
preview.image,
|
||||
);
|
||||
// Preserve original image URL for meta display; attach uploaded URL for rendering
|
||||
@@ -133,10 +209,31 @@ export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
: {}),
|
||||
};
|
||||
|
||||
await redis.set(metaKey, response, { ex: HTML_TTL_SECONDS });
|
||||
// Persist to Postgres only when we have a domainId
|
||||
const now = new Date();
|
||||
if (d) {
|
||||
await upsertSeo({
|
||||
domainId: d.id,
|
||||
sourceFinalUrl: response.source.finalUrl ?? null,
|
||||
sourceStatus: response.source.status ?? null,
|
||||
metaOpenGraph: response.meta?.openGraph ?? ({} as OpenGraphMeta),
|
||||
metaTwitter: response.meta?.twitter ?? ({} as TwitterMeta),
|
||||
metaGeneral: response.meta?.general ?? ({} as GeneralMeta),
|
||||
previewTitle: response.preview?.title ?? null,
|
||||
previewDescription: response.preview?.description ?? null,
|
||||
previewImageUrl: response.preview?.image ?? null,
|
||||
previewImageUploadedUrl: response.preview?.imageUploaded ?? null,
|
||||
canonicalUrl: response.preview?.canonicalUrl ?? null,
|
||||
robots: robots ?? ({} as RobotsTxt),
|
||||
robotsSitemaps: response.robots?.sitemaps ?? [],
|
||||
errors: response.errors ?? {},
|
||||
fetchedAt: now,
|
||||
expiresAt: ttlForSeo(now),
|
||||
});
|
||||
}
|
||||
|
||||
await captureServer("seo_fetch", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
status: status ?? -1,
|
||||
has_meta: !!meta,
|
||||
has_robots: !!robots,
|
||||
@@ -144,7 +241,7 @@ export async function getSeo(domain: string): Promise<SeoResponse> {
|
||||
});
|
||||
|
||||
console.info("[seo] ok", {
|
||||
domain: lower,
|
||||
domain: registrable ?? domain,
|
||||
status: status ?? -1,
|
||||
has_meta: !!meta,
|
||||
has_robots: !!robots,
|
||||
|
190
vitest.setup.ts
190
vitest.setup.ts
@@ -13,35 +13,9 @@ vi.mock("server-only", () => ({}));
|
||||
// Global Redis mock to prevent Upstash calls and reduce repetition across tests
|
||||
const __redisImpl = vi.hoisted(() => {
|
||||
const store = new Map<string, unknown>();
|
||||
// simple sorted-set implementation: key -> Map(member -> score)
|
||||
const zsets = new Map<string, Map<string, number>>();
|
||||
const ns = (...parts: string[]) => parts.join(":");
|
||||
|
||||
const get = vi.fn(async (key: string) =>
|
||||
store.has(key) ? store.get(key) : null,
|
||||
);
|
||||
const set = vi.fn(
|
||||
async (
|
||||
key: string,
|
||||
value: unknown,
|
||||
opts?: { ex?: number; nx?: boolean },
|
||||
) => {
|
||||
if (opts?.nx && store.has(key)) {
|
||||
return null; // NX failed - key exists
|
||||
}
|
||||
store.set(key, value);
|
||||
return "OK";
|
||||
},
|
||||
);
|
||||
const del = vi.fn(async (key: string) => {
|
||||
store.delete(key);
|
||||
});
|
||||
|
||||
const exists = vi.fn(async (key: string) => {
|
||||
return store.has(key) ? 1 : 0;
|
||||
});
|
||||
|
||||
function ensureZ(key: string): Map<string, number> {
|
||||
function getZset(key: string): Map<string, number> {
|
||||
let m = zsets.get(key);
|
||||
if (!m) {
|
||||
m = new Map<string, number>();
|
||||
@@ -49,101 +23,95 @@ const __redisImpl = vi.hoisted(() => {
|
||||
}
|
||||
return m;
|
||||
}
|
||||
const zadd = vi.fn(
|
||||
async (key: string, arg: { score: number; member: string }) => {
|
||||
const m = ensureZ(key);
|
||||
m.set(arg.member, arg.score);
|
||||
|
||||
const ns = (...parts: string[]) => parts.join(":");
|
||||
|
||||
const redis = {
|
||||
async get(key: string) {
|
||||
return store.get(key) ?? null;
|
||||
},
|
||||
async set(key: string, value: unknown, _opts?: unknown) {
|
||||
store.set(key, value);
|
||||
return "OK" as const;
|
||||
},
|
||||
async del(key: string) {
|
||||
return store.delete(key) ? 1 : 0;
|
||||
},
|
||||
async incr(key: string) {
|
||||
const current = Number(store.get(key) ?? "0");
|
||||
const next = current + 1;
|
||||
store.set(key, String(next));
|
||||
return next;
|
||||
},
|
||||
async expire(_key: string, _seconds: number) {
|
||||
return 1;
|
||||
},
|
||||
);
|
||||
const zrem = vi.fn(async (key: string, ...members: string[]) => {
|
||||
const m = ensureZ(key);
|
||||
let removed = 0;
|
||||
for (const mem of members) {
|
||||
if (m.delete(mem)) removed += 1;
|
||||
}
|
||||
return removed;
|
||||
});
|
||||
const zrange = vi.fn(
|
||||
async (
|
||||
async ttl(_key: string) {
|
||||
return 60;
|
||||
},
|
||||
async exists(key: string) {
|
||||
return store.has(key) ? 1 : 0;
|
||||
},
|
||||
async zadd(
|
||||
key: string,
|
||||
entry:
|
||||
| { score: number; member: string }
|
||||
| Array<{ score: number; member: string }>,
|
||||
) {
|
||||
const z = getZset(key);
|
||||
const list = Array.isArray(entry) ? entry : [entry];
|
||||
for (const e of list) z.set(e.member, e.score);
|
||||
return list.length;
|
||||
},
|
||||
async zrange(
|
||||
key: string,
|
||||
min: number,
|
||||
max: number,
|
||||
options?: {
|
||||
byScore?: boolean;
|
||||
limit?: { offset: number; count: number };
|
||||
},
|
||||
) => {
|
||||
const m = zsets.get(key);
|
||||
if (!m) return [] as string[];
|
||||
const pairs = [...m.entries()].filter(
|
||||
([, score]) => score >= min && score <= max,
|
||||
);
|
||||
pairs.sort((a, b) => a[1] - b[1]);
|
||||
const start = options?.limit?.offset ?? 0;
|
||||
const end = start + (options?.limit?.count ?? pairs.length);
|
||||
return pairs.slice(start, end).map(([member]) => member);
|
||||
options?: { byScore?: boolean; offset?: number; count?: number },
|
||||
): Promise<string[]> {
|
||||
const z = getZset(key);
|
||||
const items = Array.from(z.entries())
|
||||
.filter(([, score]) => score >= min && score <= max)
|
||||
.sort((a, b) => a[1] - b[1])
|
||||
.map(([member]) => member);
|
||||
const offset = options?.offset ?? 0;
|
||||
const count = options?.count ?? items.length;
|
||||
return items.slice(offset, offset + count);
|
||||
},
|
||||
);
|
||||
|
||||
const acquireLockOrWaitForResult = vi.fn(
|
||||
async <T = unknown>(options: {
|
||||
lockKey: string;
|
||||
resultKey: string;
|
||||
lockTtl?: number;
|
||||
pollIntervalMs?: number;
|
||||
maxWaitMs?: number;
|
||||
}) => {
|
||||
const { lockKey, resultKey } = options;
|
||||
|
||||
// Try to acquire lock
|
||||
if (!store.has(lockKey)) {
|
||||
store.set(lockKey, "1");
|
||||
return { acquired: true, cachedResult: null };
|
||||
async zrem(key: string, ...members: string[]) {
|
||||
const z = getZset(key);
|
||||
let removed = 0;
|
||||
for (const m of members) {
|
||||
if (z.delete(m)) removed++;
|
||||
}
|
||||
|
||||
// Lock not acquired, check for cached result
|
||||
const result = store.get(resultKey) as T | null;
|
||||
if (result !== null && result !== undefined) {
|
||||
return { acquired: false, cachedResult: result };
|
||||
}
|
||||
|
||||
// No result found
|
||||
return { acquired: false, cachedResult: null };
|
||||
return removed;
|
||||
},
|
||||
);
|
||||
} as const;
|
||||
|
||||
const reset = () => {
|
||||
store.clear();
|
||||
zsets.clear();
|
||||
get.mockClear();
|
||||
set.mockClear();
|
||||
del.mockClear();
|
||||
exists.mockClear();
|
||||
zadd.mockClear();
|
||||
zrem.mockClear();
|
||||
zrange.mockClear();
|
||||
acquireLockOrWaitForResult.mockClear();
|
||||
};
|
||||
return {
|
||||
const __redisTestHelper = {
|
||||
store,
|
||||
zsets,
|
||||
reset() {
|
||||
store.clear();
|
||||
for (const m of zsets.values()) m.clear();
|
||||
},
|
||||
} as const;
|
||||
|
||||
return {
|
||||
ns,
|
||||
redis: { get, set, del, exists, zadd, zrem, zrange },
|
||||
get,
|
||||
set,
|
||||
del,
|
||||
exists,
|
||||
zadd,
|
||||
zrem,
|
||||
zrange,
|
||||
acquireLockOrWaitForResult,
|
||||
reset,
|
||||
};
|
||||
redis,
|
||||
__redisTestHelper,
|
||||
store,
|
||||
zsets,
|
||||
reset: __redisTestHelper.reset,
|
||||
} as const;
|
||||
});
|
||||
|
||||
vi.mock("@/lib/redis", () => __redisImpl);
|
||||
|
||||
// We no longer globally mock the Drizzle client; individual tests replace
|
||||
// `@/server/db/client` with a PGlite-backed instance as needed.
|
||||
|
||||
// Expose for tests that want to clear or assert cache interactions
|
||||
declare global {
|
||||
// Makes the test helper available in the test environment
|
||||
@@ -156,10 +124,14 @@ declare global {
|
||||
}
|
||||
// Assign to global for convenient access in tests
|
||||
globalThis.__redisTestHelper = {
|
||||
store: __redisImpl.store,
|
||||
zsets: __redisImpl.zsets,
|
||||
reset: __redisImpl.reset,
|
||||
store: (__redisImpl as unknown as { store: Map<string, unknown> }).store,
|
||||
zsets: (__redisImpl as unknown as { zsets: Map<string, Map<string, number>> })
|
||||
.zsets,
|
||||
reset: (__redisImpl as unknown as { reset: () => void }).reset,
|
||||
};
|
||||
// Also attach to Node's global for tests using global.__redisTestHelper
|
||||
// biome-ignore lint/suspicious/noExplicitAny: fine for tests
|
||||
(global as any).__redisTestHelper = globalThis.__redisTestHelper;
|
||||
|
||||
// Note: The unstable_cache mock is intentionally a no-op. We are testing
|
||||
// function behavior, not caching semantics. If we need cache behavior,
|
||||
|
Reference in New Issue
Block a user