webupdatev1
This commit is contained in:
@@ -1,19 +1,45 @@
|
||||
# Environment Variables for Backend
|
||||
# Copy this file to .env and fill in your values
|
||||
# SECURITY: Never commit .env to version control
|
||||
|
||||
# Server
|
||||
PORT=3000
|
||||
PORT=5000
|
||||
NODE_ENV=development
|
||||
|
||||
# Database
|
||||
DATABASE_URL="postgresql://user:password@localhost:5432/skyartshop?schema=public"
|
||||
# Database Configuration
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
DB_NAME=skyartshop
|
||||
DB_USER=skyartapp
|
||||
DB_PASSWORD=CHANGE_THIS_STRONG_PASSWORD
|
||||
|
||||
# JWT
|
||||
JWT_SECRET=your-super-secret-jwt-key-change-this-in-production
|
||||
JWT_EXPIRES_IN=7d
|
||||
# Session Security (CRITICAL: Generate strong random secrets)
|
||||
# Generate with: node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
|
||||
SESSION_SECRET=CHANGE_THIS_64_CHARACTER_HEX_STRING
|
||||
JWT_SECRET=CHANGE_THIS_64_CHARACTER_HEX_STRING
|
||||
|
||||
# CORS
|
||||
CORS_ORIGIN=http://localhost:5173
|
||||
# CORS Configuration
|
||||
CORS_ORIGIN=http://localhost:3000
|
||||
|
||||
# Upload
|
||||
# File Upload Settings
|
||||
MAX_FILE_SIZE=5242880
|
||||
ALLOWED_FILE_TYPES=image/jpeg,image/png,image/gif,image/webp
|
||||
|
||||
# Rate Limiting
|
||||
RATE_LIMIT_WINDOW_MS=900000
|
||||
RATE_LIMIT_MAX_REQUESTS=100
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=info
|
||||
|
||||
# Security Headers
|
||||
FORCE_HTTPS=false
|
||||
|
||||
# ⚠️ SECURITY CHECKLIST:
|
||||
# [ ] Change SESSION_SECRET to 64-character random hex
|
||||
# [ ] Change JWT_SECRET to 64-character random hex
|
||||
# [ ] Set strong DB_PASSWORD (12+ chars, mixed case, numbers, symbols)
|
||||
# [ ] Update CORS_ORIGIN for production domain
|
||||
# [ ] Set NODE_ENV=production in production
|
||||
# [ ] Set FORCE_HTTPS=true in production
|
||||
# [ ] Review all settings before deploying
|
||||
|
||||
165
backend/analyze-queries.js
Normal file
165
backend/analyze-queries.js
Normal file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env node
|
||||
const { pool, query } = require("./config/database");
|
||||
|
||||
async function analyzeQueryPatterns() {
|
||||
console.log("🔍 Analyzing Query Patterns...\n");
|
||||
|
||||
try {
|
||||
// 1. Check for missing indexes on frequently queried columns
|
||||
console.log("1️⃣ Checking Query Performance:");
|
||||
|
||||
// Test products query (most common)
|
||||
const productsExplain = await query(`
|
||||
EXPLAIN ANALYZE
|
||||
SELECT p.id, p.name, p.slug, p.price, p.category, p.createdat
|
||||
FROM products p
|
||||
WHERE p.isactive = true
|
||||
ORDER BY p.createdat DESC
|
||||
LIMIT 20
|
||||
`);
|
||||
console.log(" Products listing:");
|
||||
productsExplain.rows.forEach((row) => {
|
||||
if (
|
||||
row["QUERY PLAN"].includes("Index") ||
|
||||
row["QUERY PLAN"].includes("Seq Scan")
|
||||
) {
|
||||
console.log(` ${row["QUERY PLAN"]}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Test portfolio query
|
||||
const portfolioExplain = await query(`
|
||||
EXPLAIN ANALYZE
|
||||
SELECT id, title, category, displayorder, createdat
|
||||
FROM portfolioprojects
|
||||
WHERE isactive = true
|
||||
ORDER BY displayorder ASC, createdat DESC
|
||||
`);
|
||||
console.log("\n Portfolio listing:");
|
||||
portfolioExplain.rows.slice(0, 3).forEach((row) => {
|
||||
console.log(` ${row["QUERY PLAN"]}`);
|
||||
});
|
||||
|
||||
// Test product with images (JOIN query)
|
||||
const productWithImagesExplain = await query(`
|
||||
EXPLAIN ANALYZE
|
||||
SELECT p.*, pi.image_url, pi.color_variant
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE p.isactive = true
|
||||
LIMIT 10
|
||||
`);
|
||||
console.log("\n Products with images (JOIN):");
|
||||
productWithImagesExplain.rows.slice(0, 5).forEach((row) => {
|
||||
console.log(` ${row["QUERY PLAN"]}`);
|
||||
});
|
||||
|
||||
// 2. Check for slow queries
|
||||
console.log("\n2️⃣ Checking Table Statistics:");
|
||||
const stats = await query(`
|
||||
SELECT
|
||||
schemaname,
|
||||
relname as tablename,
|
||||
n_live_tup as row_count,
|
||||
n_dead_tup as dead_rows,
|
||||
CASE
|
||||
WHEN n_live_tup > 0 THEN round(100.0 * n_dead_tup / n_live_tup, 2)
|
||||
ELSE 0
|
||||
END as bloat_pct,
|
||||
last_vacuum,
|
||||
last_analyze
|
||||
FROM pg_stat_user_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND relname IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
ORDER BY n_live_tup DESC
|
||||
`);
|
||||
|
||||
console.log(" Table health:");
|
||||
stats.rows.forEach((row) => {
|
||||
console.log(
|
||||
` ${row.tablename.padEnd(20)} ${String(row.row_count).padStart(
|
||||
6
|
||||
)} rows, ${String(row.dead_rows).padStart(4)} dead (${String(
|
||||
row.bloat_pct
|
||||
).padStart(5)}% bloat)`
|
||||
);
|
||||
});
|
||||
|
||||
// 3. Check index usage
|
||||
console.log("\n3️⃣ Index Usage Statistics:");
|
||||
const indexUsage = await query(`
|
||||
SELECT
|
||||
schemaname,
|
||||
relname as tablename,
|
||||
indexrelname as indexname,
|
||||
idx_scan as scans,
|
||||
idx_tup_read as rows_read,
|
||||
idx_tup_fetch as rows_fetched
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND relname IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
AND idx_scan > 0
|
||||
ORDER BY idx_scan DESC
|
||||
LIMIT 15
|
||||
`);
|
||||
|
||||
console.log(" Most used indexes:");
|
||||
indexUsage.rows.forEach((row) => {
|
||||
console.log(
|
||||
` ${row.indexname.padEnd(40)} ${String(row.scans).padStart(
|
||||
6
|
||||
)} scans`
|
||||
);
|
||||
});
|
||||
|
||||
// 4. Check for unused indexes
|
||||
const unusedIndexes = await query(`
|
||||
SELECT
|
||||
schemaname,
|
||||
relname as tablename,
|
||||
indexrelname as indexname
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND relname IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
AND idx_scan = 0
|
||||
AND indexrelname NOT LIKE '%_pkey'
|
||||
ORDER BY relname, indexrelname
|
||||
`);
|
||||
|
||||
if (unusedIndexes.rows.length > 0) {
|
||||
console.log("\n4️⃣ Unused Indexes (consider removing):");
|
||||
unusedIndexes.rows.forEach((row) => {
|
||||
console.log(` ${row.tablename}.${row.indexname}`);
|
||||
});
|
||||
} else {
|
||||
console.log("\n4️⃣ ✅ All indexes are being used");
|
||||
}
|
||||
|
||||
// 5. Check cache hit ratio
|
||||
console.log("\n5️⃣ Cache Hit Ratio:");
|
||||
const cacheHit = await query(`
|
||||
SELECT
|
||||
sum(heap_blks_read) as heap_read,
|
||||
sum(heap_blks_hit) as heap_hit,
|
||||
CASE
|
||||
WHEN sum(heap_blks_hit) + sum(heap_blks_read) > 0 THEN
|
||||
round(100.0 * sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)), 2)
|
||||
ELSE 0
|
||||
END as cache_hit_ratio
|
||||
FROM pg_statio_user_tables
|
||||
WHERE schemaname = 'public'
|
||||
`);
|
||||
|
||||
const ratio = cacheHit.rows[0].cache_hit_ratio;
|
||||
const status = ratio > 99 ? "✅" : ratio > 95 ? "⚠️" : "❌";
|
||||
console.log(` ${status} ${ratio}% (target: >99%)`);
|
||||
|
||||
console.log("\n✅ Analysis complete!");
|
||||
} catch (error) {
|
||||
console.error("❌ Error:", error.message);
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
analyzeQueryPatterns();
|
||||
152
backend/analyze-schema.js
Normal file
152
backend/analyze-schema.js
Normal file
@@ -0,0 +1,152 @@
|
||||
#!/usr/bin/env node
|
||||
const { pool, query } = require("./config/database");
|
||||
|
||||
async function analyzeSchema() {
|
||||
console.log("🔬 Analyzing Database Schema...\n");
|
||||
|
||||
try {
|
||||
// 1. Check products table columns
|
||||
console.log("1️⃣ Products Table Structure:");
|
||||
const productCols = await query(`
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'products'
|
||||
ORDER BY ordinal_position
|
||||
`);
|
||||
productCols.rows.forEach((col) => {
|
||||
const nullable = col.is_nullable === "YES" ? "(nullable)" : "(NOT NULL)";
|
||||
console.log(
|
||||
` ${col.column_name.padEnd(20)} ${col.data_type.padEnd(
|
||||
25
|
||||
)} ${nullable}`
|
||||
);
|
||||
});
|
||||
|
||||
// 2. Check products indexes
|
||||
console.log("\n2️⃣ Products Table Indexes:");
|
||||
const productIndexes = await query(`
|
||||
SELECT indexname, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE tablename = 'products'
|
||||
ORDER BY indexname
|
||||
`);
|
||||
productIndexes.rows.forEach((idx) => {
|
||||
console.log(` ${idx.indexname}`);
|
||||
console.log(` ${idx.indexdef.substring(0, 80)}...`);
|
||||
});
|
||||
|
||||
// 3. Check portfolio projects structure
|
||||
console.log("\n3️⃣ Portfolio Projects Structure:");
|
||||
const portfolioCols = await query(`
|
||||
SELECT column_name, data_type, is_nullable
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'portfolioprojects'
|
||||
ORDER BY ordinal_position
|
||||
`);
|
||||
portfolioCols.rows.forEach((col) => {
|
||||
const nullable = col.is_nullable === "YES" ? "(nullable)" : "(NOT NULL)";
|
||||
console.log(
|
||||
` ${col.column_name.padEnd(20)} ${col.data_type.padEnd(
|
||||
25
|
||||
)} ${nullable}`
|
||||
);
|
||||
});
|
||||
|
||||
// 4. Check portfolio indexes
|
||||
console.log("\n4️⃣ Portfolio Projects Indexes:");
|
||||
const portfolioIndexes = await query(`
|
||||
SELECT indexname, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE tablename = 'portfolioprojects'
|
||||
`);
|
||||
console.log(` Total: ${portfolioIndexes.rows.length} indexes`);
|
||||
portfolioIndexes.rows.forEach((idx) => {
|
||||
console.log(` - ${idx.indexname}`);
|
||||
});
|
||||
|
||||
// 5. Check blogposts indexes
|
||||
console.log("\n5️⃣ Blog Posts Indexes:");
|
||||
const blogIndexes = await query(`
|
||||
SELECT indexname, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE tablename = 'blogposts'
|
||||
`);
|
||||
blogIndexes.rows.forEach((idx) => {
|
||||
console.log(` - ${idx.indexname}`);
|
||||
});
|
||||
|
||||
// 6. Check pages indexes
|
||||
console.log("\n6️⃣ Pages Indexes:");
|
||||
const pagesIndexes = await query(`
|
||||
SELECT indexname, indexdef
|
||||
FROM pg_indexes
|
||||
WHERE tablename = 'pages'
|
||||
`);
|
||||
pagesIndexes.rows.forEach((idx) => {
|
||||
console.log(` - ${idx.indexname}`);
|
||||
});
|
||||
|
||||
// 7. Check product_images foreign key
|
||||
console.log("\n7️⃣ Product Images Foreign Keys:");
|
||||
const piFks = await query(`
|
||||
SELECT
|
||||
tc.constraint_name,
|
||||
kcu.column_name,
|
||||
ccu.table_name AS foreign_table,
|
||||
rc.delete_rule,
|
||||
rc.update_rule
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.key_column_usage AS kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
JOIN information_schema.referential_constraints AS rc
|
||||
ON tc.constraint_name = rc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_name = 'product_images'
|
||||
`);
|
||||
if (piFks.rows.length === 0) {
|
||||
console.log(" ⚠️ No foreign keys found!");
|
||||
} else {
|
||||
piFks.rows.forEach((fk) => {
|
||||
console.log(
|
||||
` ${fk.column_name} → ${fk.foreign_table} (DELETE: ${fk.delete_rule})`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// 8. Check unique constraints
|
||||
console.log("\n8️⃣ Unique Constraints:");
|
||||
const uniqueConstraints = await query(`
|
||||
SELECT
|
||||
tc.table_name,
|
||||
tc.constraint_name,
|
||||
kcu.column_name
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
WHERE tc.constraint_type = 'UNIQUE'
|
||||
AND tc.table_schema = 'public'
|
||||
AND tc.table_name IN ('products', 'blogposts', 'pages', 'portfolioprojects')
|
||||
ORDER BY tc.table_name, tc.constraint_name
|
||||
`);
|
||||
if (uniqueConstraints.rows.length === 0) {
|
||||
console.log(" ⚠️ No unique constraints on slug columns!");
|
||||
} else {
|
||||
uniqueConstraints.rows.forEach((uc) => {
|
||||
console.log(
|
||||
` ${uc.table_name}.${uc.column_name} (${uc.constraint_name})`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
console.log("\n✅ Analysis complete!");
|
||||
} catch (error) {
|
||||
console.error("❌ Error:", error.message);
|
||||
console.error(error);
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
analyzeSchema();
|
||||
64
backend/apply-db-fixes.js
Normal file
64
backend/apply-db-fixes.js
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env node
|
||||
const { pool, query } = require("./config/database");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
async function applyMigration() {
|
||||
console.log("🔧 Applying Database Fixes...\n");
|
||||
|
||||
try {
|
||||
// Read the migration file
|
||||
const migrationPath = path.join(
|
||||
__dirname,
|
||||
"migrations",
|
||||
"006_database_fixes.sql"
|
||||
);
|
||||
const migrationSQL = fs.readFileSync(migrationPath, "utf8");
|
||||
|
||||
console.log("📄 Running migration: 006_database_fixes.sql");
|
||||
console.log("─".repeat(60));
|
||||
|
||||
// Execute the migration
|
||||
await query(migrationSQL);
|
||||
|
||||
console.log("\n✅ Migration applied successfully!");
|
||||
console.log("\n📊 Verification:");
|
||||
console.log("─".repeat(60));
|
||||
|
||||
// Verify the changes
|
||||
const fkResult = await query(`
|
||||
SELECT COUNT(*) as fk_count
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_type = 'FOREIGN KEY'
|
||||
AND table_schema = 'public'
|
||||
`);
|
||||
console.log(` Foreign keys: ${fkResult.rows[0].fk_count}`);
|
||||
|
||||
const indexResult = await query(`
|
||||
SELECT COUNT(*) as index_count
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
`);
|
||||
console.log(` Indexes (main tables): ${indexResult.rows[0].index_count}`);
|
||||
|
||||
const uniqueResult = await query(`
|
||||
SELECT COUNT(*) as unique_count
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_type = 'UNIQUE'
|
||||
AND table_schema = 'public'
|
||||
AND table_name IN ('products', 'blogposts', 'pages')
|
||||
`);
|
||||
console.log(` Unique constraints: ${uniqueResult.rows[0].unique_count}`);
|
||||
|
||||
console.log("\n✅ Database fixes complete!");
|
||||
} catch (error) {
|
||||
console.error("❌ Error applying migration:", error.message);
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
applyMigration();
|
||||
217
backend/apply-fixes-safe.js
Normal file
217
backend/apply-fixes-safe.js
Normal file
@@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env node
|
||||
const { pool, query } = require("./config/database");
|
||||
|
||||
async function applyPartialFixes() {
|
||||
console.log("🔧 Applying Database Fixes (User-Level)...\n");
|
||||
|
||||
try {
|
||||
console.log("1️⃣ Creating Indexes...");
|
||||
|
||||
// Products indexes
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_isactive ON products(isactive) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_products_isactive");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_isfeatured ON products(isfeatured, createdat DESC) WHERE isfeatured = true AND isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_products_isfeatured");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_isbestseller ON products(isbestseller, createdat DESC) WHERE isbestseller = true AND isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_products_isbestseller");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_category ON products(category, createdat DESC) WHERE isactive = true AND category IS NOT NULL`
|
||||
);
|
||||
console.log(" ✅ idx_products_category");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_createdat ON products(createdat DESC) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_products_createdat");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_products_price ON products(price) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_products_price");
|
||||
|
||||
// Portfolio indexes
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_portfolio_isactive ON portfolioprojects(isactive) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_portfolio_isactive");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_portfolio_category ON portfolioprojects(category) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_portfolio_category");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_portfolio_displayorder ON portfolioprojects(displayorder ASC, createdat DESC) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_portfolio_displayorder");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_portfolio_createdat ON portfolioprojects(createdat DESC) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_portfolio_createdat");
|
||||
|
||||
// Pages indexes
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_pages_slug ON pages(slug) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_pages_slug");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_pages_isactive ON pages(isactive) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_pages_isactive");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_pages_createdat ON pages(createdat DESC) WHERE isactive = true`
|
||||
);
|
||||
console.log(" ✅ idx_pages_createdat");
|
||||
|
||||
// Product images indexes
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_product_images_color_variant ON product_images(color_variant) WHERE color_variant IS NOT NULL`
|
||||
);
|
||||
console.log(" ✅ idx_product_images_color_variant");
|
||||
|
||||
await query(
|
||||
`CREATE INDEX IF NOT EXISTS idx_product_images_color_code ON product_images(color_code) WHERE color_code IS NOT NULL`
|
||||
);
|
||||
console.log(" ✅ idx_product_images_color_code");
|
||||
|
||||
console.log("\n2️⃣ Adding Foreign Keys...");
|
||||
try {
|
||||
await query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'fk_product_images_product'
|
||||
) THEN
|
||||
ALTER TABLE product_images
|
||||
ADD CONSTRAINT fk_product_images_product
|
||||
FOREIGN KEY (product_id) REFERENCES products(id)
|
||||
ON DELETE CASCADE;
|
||||
END IF;
|
||||
END $$;
|
||||
`);
|
||||
console.log(" ✅ product_images -> products");
|
||||
} catch (e) {
|
||||
console.log(" ⚠️ product_images FK:", e.message);
|
||||
}
|
||||
|
||||
try {
|
||||
await query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
UPDATE uploads SET folder_id = NULL
|
||||
WHERE folder_id NOT IN (SELECT id FROM media_folders);
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'fk_uploads_folder'
|
||||
) THEN
|
||||
ALTER TABLE uploads
|
||||
ADD CONSTRAINT fk_uploads_folder
|
||||
FOREIGN KEY (folder_id) REFERENCES media_folders(id)
|
||||
ON DELETE SET NULL;
|
||||
END IF;
|
||||
END $$;
|
||||
`);
|
||||
console.log(" ✅ uploads -> media_folders");
|
||||
} catch (e) {
|
||||
console.log(" ⚠️ uploads FK:", e.message);
|
||||
}
|
||||
|
||||
console.log("\n3️⃣ Adding Unique Constraints...");
|
||||
try {
|
||||
await query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'unique_products_slug') THEN
|
||||
WITH duplicates AS (
|
||||
SELECT slug, array_agg(id) as ids
|
||||
FROM products
|
||||
WHERE slug IS NOT NULL
|
||||
GROUP BY slug
|
||||
HAVING COUNT(*) > 1
|
||||
)
|
||||
UPDATE products p
|
||||
SET slug = p.slug || '-' || substring(p.id, 1, 8)
|
||||
WHERE p.id IN (SELECT unnest(ids[2:]) FROM duplicates);
|
||||
|
||||
ALTER TABLE products ADD CONSTRAINT unique_products_slug UNIQUE(slug);
|
||||
END IF;
|
||||
END $$;
|
||||
`);
|
||||
console.log(" ✅ products.slug unique constraint");
|
||||
} catch (e) {
|
||||
console.log(" ⚠️ products.slug:", e.message);
|
||||
}
|
||||
|
||||
try {
|
||||
await query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'unique_pages_slug') THEN
|
||||
WITH duplicates AS (
|
||||
SELECT slug, array_agg(id) as ids
|
||||
FROM pages
|
||||
WHERE slug IS NOT NULL
|
||||
GROUP BY slug
|
||||
HAVING COUNT(*) > 1
|
||||
)
|
||||
UPDATE pages p
|
||||
SET slug = p.slug || '-' || p.id::text
|
||||
WHERE p.id IN (SELECT unnest(ids[2:]) FROM duplicates);
|
||||
|
||||
ALTER TABLE pages ADD CONSTRAINT unique_pages_slug UNIQUE(slug);
|
||||
END IF;
|
||||
END $$;
|
||||
`);
|
||||
console.log(" ✅ pages.slug unique constraint");
|
||||
} catch (e) {
|
||||
console.log(" ⚠️ pages.slug:", e.message);
|
||||
}
|
||||
|
||||
console.log("\n4️⃣ Running ANALYZE...");
|
||||
await query("ANALYZE products");
|
||||
await query("ANALYZE product_images");
|
||||
await query("ANALYZE portfolioprojects");
|
||||
await query("ANALYZE blogposts");
|
||||
await query("ANALYZE pages");
|
||||
console.log(" ✅ Tables analyzed");
|
||||
|
||||
console.log("\n📊 Final Status:");
|
||||
const indexCount = await query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
`);
|
||||
console.log(` Total indexes: ${indexCount.rows[0].count}`);
|
||||
|
||||
const fkCount = await query(`
|
||||
SELECT COUNT(*) as count
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_type = 'FOREIGN KEY' AND table_schema = 'public'
|
||||
`);
|
||||
console.log(` Foreign keys: ${fkCount.rows[0].count}`);
|
||||
|
||||
console.log("\n✅ Database fixes applied successfully!");
|
||||
} catch (error) {
|
||||
console.error("❌ Error:", error.message);
|
||||
process.exit(1);
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
applyPartialFixes();
|
||||
37
backend/check-db-schema.sql
Normal file
37
backend/check-db-schema.sql
Normal file
@@ -0,0 +1,37 @@
|
||||
-- Get all tables
|
||||
SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
|
||||
ORDER BY table_name;
|
||||
|
||||
-- Get columns for key tables
|
||||
\echo '\n=== PRODUCTS TABLE ==='
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'products'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
\echo '\n=== PRODUCT_IMAGES TABLE ==='
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'product_images'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
\echo '\n=== UPLOADS TABLE ==='
|
||||
SELECT column_name, data_type, is_nullable, column_default
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'uploads'
|
||||
ORDER BY ordinal_position;
|
||||
|
||||
\echo '\n=== FOREIGN KEYS ==='
|
||||
SELECT
|
||||
tc.table_name,
|
||||
kcu.column_name,
|
||||
ccu.table_name AS foreign_table_name,
|
||||
ccu.column_name AS foreign_column_name
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.key_column_usage AS kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
ORDER BY tc.table_name, kcu.column_name;
|
||||
110
backend/check-db-status.js
Normal file
110
backend/check-db-status.js
Normal file
@@ -0,0 +1,110 @@
|
||||
#!/usr/bin/env node
|
||||
const { pool, query } = require("./config/database");
|
||||
|
||||
async function checkDatabase() {
|
||||
console.log("🔍 Checking Database Status...\n");
|
||||
|
||||
try {
|
||||
// 1. Check connection
|
||||
console.log("1️⃣ Testing Connection...");
|
||||
const connResult = await query(
|
||||
"SELECT NOW() as time, current_database() as db"
|
||||
);
|
||||
console.log(`✅ Connected to: ${connResult.rows[0].db}`);
|
||||
console.log(`⏰ Server time: ${connResult.rows[0].time}\n`);
|
||||
|
||||
// 2. List all tables
|
||||
console.log("2️⃣ Listing Tables...");
|
||||
const tablesResult = await query(`
|
||||
SELECT tablename
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
ORDER BY tablename
|
||||
`);
|
||||
console.log(`📋 Tables (${tablesResult.rows.length}):`);
|
||||
tablesResult.rows.forEach((row) => console.log(` - ${row.tablename}`));
|
||||
console.log();
|
||||
|
||||
// 3. Check row counts
|
||||
console.log("3️⃣ Checking Row Counts...");
|
||||
const countResult = await query(`
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM products) as products,
|
||||
(SELECT COUNT(*) FROM product_images) as product_images,
|
||||
(SELECT COUNT(*) FROM portfolioprojects) as portfolioprojects,
|
||||
(SELECT COUNT(*) FROM blogposts) as blogposts,
|
||||
(SELECT COUNT(*) FROM pages) as pages,
|
||||
(SELECT COUNT(*) FROM adminusers) as adminusers,
|
||||
(SELECT COUNT(*) FROM uploads) as uploads,
|
||||
(SELECT COUNT(*) FROM media_folders) as media_folders,
|
||||
(SELECT COUNT(*) FROM site_settings) as site_settings
|
||||
`);
|
||||
console.log("📊 Row counts:");
|
||||
Object.entries(countResult.rows[0]).forEach(([table, count]) => {
|
||||
console.log(` ${table.padEnd(20)}: ${count}`);
|
||||
});
|
||||
console.log();
|
||||
|
||||
// 4. Check for missing columns
|
||||
console.log("4️⃣ Checking Product Columns...");
|
||||
const productCols = await query(`
|
||||
SELECT column_name, data_type
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'products'
|
||||
ORDER BY ordinal_position
|
||||
`);
|
||||
console.log(`📝 Products table has ${productCols.rows.length} columns`);
|
||||
|
||||
// 5. Check indexes
|
||||
console.log("\n5️⃣ Checking Indexes...");
|
||||
const indexResult = await query(`
|
||||
SELECT
|
||||
tablename,
|
||||
COUNT(*) as index_count
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
GROUP BY tablename
|
||||
ORDER BY tablename
|
||||
`);
|
||||
console.log("🔍 Index counts:");
|
||||
indexResult.rows.forEach((row) => {
|
||||
console.log(` ${row.tablename.padEnd(25)}: ${row.index_count} indexes`);
|
||||
});
|
||||
console.log();
|
||||
|
||||
// 6. Check foreign keys
|
||||
console.log("6️⃣ Checking Foreign Keys...");
|
||||
const fkResult = await query(`
|
||||
SELECT
|
||||
tc.table_name,
|
||||
kcu.column_name,
|
||||
ccu.table_name AS foreign_table,
|
||||
rc.delete_rule
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.key_column_usage AS kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
JOIN information_schema.referential_constraints AS rc
|
||||
ON tc.constraint_name = rc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = 'public'
|
||||
ORDER BY tc.table_name
|
||||
`);
|
||||
console.log(`🔗 Foreign keys (${fkResult.rows.length}):`);
|
||||
fkResult.rows.forEach((row) => {
|
||||
console.log(
|
||||
` ${row.table_name}.${row.column_name} → ${row.foreign_table} (${row.delete_rule})`
|
||||
);
|
||||
});
|
||||
console.log();
|
||||
|
||||
console.log("✅ Database check complete!");
|
||||
} catch (error) {
|
||||
console.error("❌ Error:", error.message);
|
||||
} finally {
|
||||
await pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
checkDatabase();
|
||||
@@ -1,4 +1,5 @@
|
||||
const { Pool } = require("pg");
|
||||
const crypto = require("crypto");
|
||||
const logger = require("./logger");
|
||||
require("dotenv").config();
|
||||
|
||||
@@ -8,23 +9,86 @@ const pool = new Pool({
|
||||
database: process.env.DB_NAME || "skyartshop",
|
||||
user: process.env.DB_USER || "skyartapp",
|
||||
password: process.env.DB_PASSWORD,
|
||||
max: 20,
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 2000,
|
||||
max: 30, // Increased to 30 for higher concurrency
|
||||
min: 10, // Keep 10 connections warm for instant response
|
||||
idleTimeoutMillis: 60000,
|
||||
connectionTimeoutMillis: 3000,
|
||||
application_name: "skyartshop-api",
|
||||
keepAlive: true, // TCP keepalive
|
||||
keepAliveInitialDelayMillis: 10000,
|
||||
statement_timeout: 30000, // 30s query timeout
|
||||
});
|
||||
|
||||
pool.on("connect", () => logger.info("✓ PostgreSQL connected"));
|
||||
pool.on("error", (err) => logger.error("PostgreSQL error:", err));
|
||||
|
||||
// Query cache for SELECT statements with crypto-based keys
|
||||
const queryCache = new Map();
|
||||
const queryCacheOrder = []; // LRU tracking
|
||||
const QUERY_CACHE_TTL = 15000; // 15 seconds (increased)
|
||||
const QUERY_CACHE_MAX_SIZE = 500; // 500 cached queries (increased)
|
||||
const SLOW_QUERY_THRESHOLD = 50; // 50ms threshold (stricter)
|
||||
|
||||
// Generate fast cache key using crypto hash
|
||||
const getCacheKey = (text, params) => {
|
||||
const hash = crypto.createHash("md5");
|
||||
hash.update(text);
|
||||
if (params) hash.update(JSON.stringify(params));
|
||||
return hash.digest("hex");
|
||||
};
|
||||
|
||||
const query = async (text, params) => {
|
||||
const start = Date.now();
|
||||
const isSelect = text.trim().toUpperCase().startsWith("SELECT");
|
||||
|
||||
// Check cache for SELECT queries
|
||||
if (isSelect) {
|
||||
const cacheKey = getCacheKey(text, params);
|
||||
const cached = queryCache.get(cacheKey);
|
||||
|
||||
if (cached && Date.now() - cached.timestamp < QUERY_CACHE_TTL) {
|
||||
logger.debug("Query cache hit", { duration: Date.now() - start });
|
||||
return cached.data;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await pool.query(text, params);
|
||||
const duration = Date.now() - start;
|
||||
logger.debug("Executed query", { duration, rows: res.rowCount });
|
||||
|
||||
// Cache SELECT queries with LRU eviction
|
||||
if (isSelect) {
|
||||
const cacheKey = getCacheKey(text, params);
|
||||
|
||||
// LRU eviction
|
||||
if (queryCache.size >= QUERY_CACHE_MAX_SIZE) {
|
||||
const oldestKey = queryCacheOrder.shift();
|
||||
if (oldestKey) queryCache.delete(oldestKey);
|
||||
}
|
||||
|
||||
queryCache.set(cacheKey, { data: res, timestamp: Date.now() });
|
||||
queryCacheOrder.push(cacheKey);
|
||||
}
|
||||
|
||||
// Log slow queries
|
||||
if (duration > SLOW_QUERY_THRESHOLD) {
|
||||
logger.warn("Slow query", {
|
||||
duration,
|
||||
text: text.substring(0, 100),
|
||||
rows: res.rowCount,
|
||||
params: params?.length || 0,
|
||||
});
|
||||
}
|
||||
|
||||
return res;
|
||||
} catch (error) {
|
||||
logger.error("Query error:", { text, error: error.message });
|
||||
const duration = Date.now() - start;
|
||||
logger.error("Query error", {
|
||||
text: text.substring(0, 100),
|
||||
error: error.message,
|
||||
duration,
|
||||
code: error.code,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
@@ -46,7 +110,37 @@ const transaction = async (callback) => {
|
||||
}
|
||||
};
|
||||
|
||||
// Health check
|
||||
// Batch query execution for parallel operations
|
||||
const batchQuery = async (queries) => {
|
||||
try {
|
||||
const results = await Promise.all(
|
||||
queries.map(({ text, params }) => query(text, params))
|
||||
);
|
||||
return results;
|
||||
} catch (error) {
|
||||
logger.error("Batch query error:", error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
// Clear query cache (useful for cache invalidation)
|
||||
const clearQueryCache = (pattern) => {
|
||||
if (pattern) {
|
||||
// Clear specific pattern
|
||||
for (const key of queryCache.keys()) {
|
||||
if (key.includes(pattern)) {
|
||||
queryCache.delete(key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Clear all
|
||||
queryCache.clear();
|
||||
queryCacheOrder.length = 0;
|
||||
}
|
||||
logger.info("Query cache cleared", { pattern: pattern || "all" });
|
||||
};
|
||||
|
||||
// Health check with pool metrics
|
||||
const healthCheck = async () => {
|
||||
try {
|
||||
const result = await query(
|
||||
@@ -56,6 +150,15 @@ const healthCheck = async () => {
|
||||
healthy: true,
|
||||
database: result.rows[0].database,
|
||||
timestamp: result.rows[0].time,
|
||||
pool: {
|
||||
total: pool.totalCount,
|
||||
idle: pool.idleCount,
|
||||
waiting: pool.waitingCount,
|
||||
},
|
||||
cache: {
|
||||
size: queryCache.size,
|
||||
maxSize: QUERY_CACHE_MAX_SIZE,
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error("Database health check failed:", error);
|
||||
@@ -66,4 +169,11 @@ const healthCheck = async () => {
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = { pool, query, transaction, healthCheck };
|
||||
module.exports = {
|
||||
pool,
|
||||
query,
|
||||
transaction,
|
||||
batchQuery,
|
||||
clearQueryCache,
|
||||
healthCheck,
|
||||
};
|
||||
|
||||
355
backend/database-analysis-fixes.sql
Normal file
355
backend/database-analysis-fixes.sql
Normal file
@@ -0,0 +1,355 @@
|
||||
-- =====================================================
|
||||
-- DATABASE ANALYSIS & FIXES FOR SKYARTSHOP
|
||||
-- Date: January 3, 2026
|
||||
-- Purpose: Comprehensive database schema validation and fixes
|
||||
-- =====================================================
|
||||
|
||||
-- =====================================================
|
||||
-- PART 1: VERIFY CORE TABLES EXIST
|
||||
-- =====================================================
|
||||
|
||||
-- Ensure all required tables exist
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if tables exist and create if missing
|
||||
IF NOT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'products') THEN
|
||||
RAISE EXCEPTION 'CRITICAL: products table is missing!';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'product_images') THEN
|
||||
RAISE NOTICE 'product_images table is missing - will be created';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'adminusers') THEN
|
||||
RAISE EXCEPTION 'CRITICAL: adminusers table is missing!';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'uploads') THEN
|
||||
RAISE NOTICE 'uploads table is missing - will be created';
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'media_folders') THEN
|
||||
RAISE NOTICE 'media_folders table is missing - will be created';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 2: VERIFY AND ADD MISSING COLUMNS
|
||||
-- =====================================================
|
||||
|
||||
-- Products table columns
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS id TEXT PRIMARY KEY DEFAULT replace(gen_random_uuid()::text, '-', '');
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS name VARCHAR(255) NOT NULL DEFAULT '';
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS slug VARCHAR(255);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS shortdescription TEXT;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS description TEXT;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS price DECIMAL(10,2) NOT NULL DEFAULT 0.00;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS stockquantity INTEGER DEFAULT 0;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS category VARCHAR(100);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS sku VARCHAR(100);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS weight DECIMAL(10,2);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS dimensions VARCHAR(100);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS material VARCHAR(255);
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS isactive BOOLEAN DEFAULT true;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS isfeatured BOOLEAN DEFAULT false;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS isbestseller BOOLEAN DEFAULT false;
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS createdat TIMESTAMP DEFAULT NOW();
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS updatedat TIMESTAMP DEFAULT NOW();
|
||||
ALTER TABLE products ADD COLUMN IF NOT EXISTS metakeywords TEXT;
|
||||
|
||||
-- Portfolio projects columns
|
||||
ALTER TABLE portfolioprojects ADD COLUMN IF NOT EXISTS imageurl VARCHAR(500);
|
||||
ALTER TABLE portfolioprojects ADD COLUMN IF NOT EXISTS featuredimage VARCHAR(500);
|
||||
ALTER TABLE portfolioprojects ADD COLUMN IF NOT EXISTS images JSONB;
|
||||
ALTER TABLE portfolioprojects ADD COLUMN IF NOT EXISTS displayorder INTEGER DEFAULT 0;
|
||||
|
||||
-- Pages table columns
|
||||
ALTER TABLE pages ADD COLUMN IF NOT EXISTS ispublished BOOLEAN DEFAULT true;
|
||||
ALTER TABLE pages ADD COLUMN IF NOT EXISTS pagecontent TEXT;
|
||||
|
||||
-- Blog posts columns
|
||||
ALTER TABLE blogposts ADD COLUMN IF NOT EXISTS excerpt TEXT;
|
||||
ALTER TABLE blogposts ADD COLUMN IF NOT EXISTS imageurl VARCHAR(500);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 3: CREATE PRODUCT_IMAGES TABLE (IF MISSING)
|
||||
-- =====================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS product_images (
|
||||
id TEXT PRIMARY KEY DEFAULT replace(gen_random_uuid()::text, '-', ''),
|
||||
product_id TEXT NOT NULL,
|
||||
image_url VARCHAR(500) NOT NULL,
|
||||
color_variant VARCHAR(100),
|
||||
color_code VARCHAR(7),
|
||||
alt_text VARCHAR(255),
|
||||
display_order INTEGER DEFAULT 0,
|
||||
is_primary BOOLEAN DEFAULT FALSE,
|
||||
variant_price DECIMAL(10,2),
|
||||
variant_stock INTEGER DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
CONSTRAINT fk_product_images_product FOREIGN KEY (product_id)
|
||||
REFERENCES products(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 4: CREATE UPLOADS & MEDIA_FOLDERS TABLES
|
||||
-- =====================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS media_folders (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
parent_id INTEGER REFERENCES media_folders(id) ON DELETE CASCADE,
|
||||
path VARCHAR(1000) NOT NULL,
|
||||
created_by TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(parent_id, name)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS uploads (
|
||||
id SERIAL PRIMARY KEY,
|
||||
filename VARCHAR(255) NOT NULL UNIQUE,
|
||||
original_name VARCHAR(255) NOT NULL,
|
||||
file_path VARCHAR(500) NOT NULL,
|
||||
file_size INTEGER NOT NULL,
|
||||
mime_type VARCHAR(100) NOT NULL,
|
||||
uploaded_by TEXT,
|
||||
folder_id INTEGER REFERENCES media_folders(id) ON DELETE SET NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
used_in_type VARCHAR(50),
|
||||
used_in_id TEXT
|
||||
);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 5: CREATE SITE_SETTINGS TABLE
|
||||
-- =====================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS site_settings (
|
||||
id SERIAL PRIMARY KEY,
|
||||
key VARCHAR(100) UNIQUE NOT NULL,
|
||||
settings JSONB NOT NULL DEFAULT '{}'::jsonb,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Insert default settings if not exists
|
||||
INSERT INTO site_settings (key, settings) VALUES
|
||||
('menu', '{"items": []}'::jsonb),
|
||||
('homepage', '{"hero": {}, "sections": []}'::jsonb)
|
||||
ON CONFLICT (key) DO NOTHING;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 6: CREATE TEAM_MEMBERS TABLE
|
||||
-- =====================================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS team_members (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
position VARCHAR(255) NOT NULL,
|
||||
bio TEXT,
|
||||
image_url VARCHAR(500),
|
||||
display_order INTEGER DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 7: ADD ALL CRITICAL INDEXES
|
||||
-- =====================================================
|
||||
|
||||
-- Products indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_products_isactive ON products(isactive) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_products_isfeatured ON products(isfeatured) WHERE isfeatured = true AND isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_products_slug ON products(slug) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_products_category ON products(category) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_products_createdat ON products(createdat DESC) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_products_composite ON products(isactive, isfeatured, createdat DESC);
|
||||
|
||||
-- Product images indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_product_id ON product_images(product_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_is_primary ON product_images(product_id, is_primary) WHERE is_primary = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_display_order ON product_images(product_id, display_order, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_color ON product_images(color_variant);
|
||||
|
||||
-- Blog posts indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_blogposts_ispublished ON blogposts(ispublished) WHERE ispublished = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_blogposts_slug ON blogposts(slug) WHERE ispublished = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_blogposts_createdat ON blogposts(createdat DESC) WHERE ispublished = true;
|
||||
|
||||
-- Portfolio projects indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_isactive ON portfolioprojects(isactive) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_display ON portfolioprojects(displayorder ASC, createdat DESC) WHERE isactive = true;
|
||||
|
||||
-- Pages indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_pages_slug ON pages(slug) WHERE isactive = true;
|
||||
CREATE INDEX IF NOT EXISTS idx_pages_isactive ON pages(isactive) WHERE isactive = true;
|
||||
|
||||
-- Homepage sections indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_homepagesections_display ON homepagesections(displayorder ASC);
|
||||
|
||||
-- Team members indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_team_members_display ON team_members(display_order ASC, created_at DESC);
|
||||
|
||||
-- Uploads indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_filename ON uploads(filename);
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_created_at ON uploads(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_folder_id ON uploads(folder_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_usage ON uploads(used_in_type, used_in_id);
|
||||
|
||||
-- Media folders indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_media_folders_parent_id ON media_folders(parent_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_media_folders_path ON media_folders(path);
|
||||
|
||||
-- Session table optimization
|
||||
CREATE INDEX IF NOT EXISTS idx_session_expire ON session(expire);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 8: ADD UNIQUE CONSTRAINTS
|
||||
-- =====================================================
|
||||
|
||||
-- Ensure unique slugs
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Products slug constraint
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'unique_products_slug'
|
||||
) THEN
|
||||
ALTER TABLE products ADD CONSTRAINT unique_products_slug
|
||||
UNIQUE(slug);
|
||||
END IF;
|
||||
|
||||
-- Blog posts slug constraint
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'unique_blogposts_slug'
|
||||
) THEN
|
||||
ALTER TABLE blogposts ADD CONSTRAINT unique_blogposts_slug
|
||||
UNIQUE(slug);
|
||||
END IF;
|
||||
|
||||
-- Pages slug constraint
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'unique_pages_slug'
|
||||
) THEN
|
||||
ALTER TABLE pages ADD CONSTRAINT unique_pages_slug
|
||||
UNIQUE(slug);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 9: ADD CHECK CONSTRAINTS FOR DATA INTEGRITY
|
||||
-- =====================================================
|
||||
|
||||
-- Products constraints
|
||||
ALTER TABLE products DROP CONSTRAINT IF EXISTS check_products_price_positive;
|
||||
ALTER TABLE products ADD CONSTRAINT check_products_price_positive
|
||||
CHECK (price >= 0);
|
||||
|
||||
ALTER TABLE products DROP CONSTRAINT IF EXISTS check_products_stock_nonnegative;
|
||||
ALTER TABLE products ADD CONSTRAINT check_products_stock_nonnegative
|
||||
CHECK (stockquantity >= 0);
|
||||
|
||||
-- Product images constraints
|
||||
ALTER TABLE product_images DROP CONSTRAINT IF EXISTS check_variant_price_positive;
|
||||
ALTER TABLE product_images ADD CONSTRAINT check_variant_price_positive
|
||||
CHECK (variant_price IS NULL OR variant_price >= 0);
|
||||
|
||||
ALTER TABLE product_images DROP CONSTRAINT IF EXISTS check_variant_stock_nonnegative;
|
||||
ALTER TABLE product_images ADD CONSTRAINT check_variant_stock_nonnegative
|
||||
CHECK (variant_stock >= 0);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 10: DATA MIGRATION & CLEANUP
|
||||
-- =====================================================
|
||||
|
||||
-- Generate slugs for products missing them
|
||||
UPDATE products
|
||||
SET slug = LOWER(REGEXP_REPLACE(REGEXP_REPLACE(name, '[^a-zA-Z0-9\s-]', '', 'g'), '\s+', '-', 'g'))
|
||||
WHERE (slug IS NULL OR slug = '') AND name IS NOT NULL;
|
||||
|
||||
-- Set ispublished for pages from isactive
|
||||
UPDATE pages
|
||||
SET ispublished = isactive
|
||||
WHERE ispublished IS NULL;
|
||||
|
||||
-- Migrate portfolio featured image if needed
|
||||
UPDATE portfolioprojects
|
||||
SET imageurl = featuredimage
|
||||
WHERE imageurl IS NULL AND featuredimage IS NOT NULL;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 11: ANALYZE TABLES FOR QUERY OPTIMIZATION
|
||||
-- =====================================================
|
||||
|
||||
ANALYZE products;
|
||||
ANALYZE product_images;
|
||||
ANALYZE blogposts;
|
||||
ANALYZE portfolioprojects;
|
||||
ANALYZE pages;
|
||||
ANALYZE homepagesections;
|
||||
ANALYZE uploads;
|
||||
ANALYZE media_folders;
|
||||
ANALYZE team_members;
|
||||
ANALYZE site_settings;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 12: VERIFICATION QUERIES
|
||||
-- =====================================================
|
||||
|
||||
-- Show table row counts
|
||||
SELECT 'products' as table_name, COUNT(*) as row_count FROM products
|
||||
UNION ALL
|
||||
SELECT 'product_images', COUNT(*) FROM product_images
|
||||
UNION ALL
|
||||
SELECT 'blogposts', COUNT(*) FROM blogposts
|
||||
UNION ALL
|
||||
SELECT 'portfolioprojects', COUNT(*) FROM portfolioprojects
|
||||
UNION ALL
|
||||
SELECT 'pages', COUNT(*) FROM pages
|
||||
UNION ALL
|
||||
SELECT 'uploads', COUNT(*) FROM uploads
|
||||
UNION ALL
|
||||
SELECT 'media_folders', COUNT(*) FROM media_folders
|
||||
UNION ALL
|
||||
SELECT 'team_members', COUNT(*) FROM team_members
|
||||
UNION ALL
|
||||
SELECT 'adminusers', COUNT(*) FROM adminusers
|
||||
ORDER BY table_name;
|
||||
|
||||
-- Show index usage
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan as times_used,
|
||||
idx_tup_read as rows_read,
|
||||
idx_tup_fetch as rows_fetched
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE schemaname = 'public'
|
||||
ORDER BY tablename, indexname;
|
||||
|
||||
-- Show foreign key constraints
|
||||
SELECT
|
||||
tc.table_name,
|
||||
kcu.column_name,
|
||||
ccu.table_name AS foreign_table_name,
|
||||
ccu.column_name AS foreign_column_name,
|
||||
rc.update_rule,
|
||||
rc.delete_rule
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.key_column_usage AS kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
JOIN information_schema.referential_constraints AS rc
|
||||
ON tc.constraint_name = rc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = 'public'
|
||||
ORDER BY tc.table_name, kcu.column_name;
|
||||
|
||||
-- =====================================================
|
||||
-- END OF DATABASE ANALYSIS & FIXES
|
||||
-- =====================================================
|
||||
113
backend/fix-contact-colors.js
Normal file
113
backend/fix-contact-colors.js
Normal file
@@ -0,0 +1,113 @@
|
||||
/**
|
||||
* Fix Contact Page Colors
|
||||
* Updates the contact page content in the database to use the pink color palette
|
||||
*/
|
||||
|
||||
const { query } = require("./config/database");
|
||||
const logger = require("./config/logger");
|
||||
|
||||
const UPDATED_CONTACT_CONTENT = `
|
||||
<div style="text-align: center; margin-bottom: 48px;">
|
||||
<h2 style="font-size: 2rem; font-weight: 700; color: #202023; margin-bottom: 12px;">
|
||||
Our Contact Information
|
||||
</h2>
|
||||
<p style="font-size: 1rem; color: #202023">
|
||||
Reach out to us through any of these channels
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 24px; margin-bottom: 48px;">
|
||||
<!-- Phone Card -->
|
||||
<div style="background: linear-gradient(135deg, #FFEBEB 0%, #FFD0D0 100%); padding: 32px; border-radius: 16px; text-align: center; color: #202023; box-shadow: 0 8px 24px rgba(252, 177, 216, 0.3);">
|
||||
<div style="font-size: 48px; margin-bottom: 16px;">
|
||||
<i class="bi bi-telephone-fill"></i>
|
||||
</div>
|
||||
<h3 style="font-size: 1.25rem; font-weight: 600; margin-bottom: 12px; color: #202023;">Phone</h3>
|
||||
<p style="font-size: 1rem; opacity: 0.9; margin: 0; color: #202023;">+1 (555) 123-4567</p>
|
||||
</div>
|
||||
|
||||
<!-- Email Card -->
|
||||
<div style="background: linear-gradient(135deg, #FFD0D0 0%, #FCB1D8 100%); padding: 32px; border-radius: 16px; text-align: center; color: #202023; box-shadow: 0 8px 24px rgba(252, 177, 216, 0.3);">
|
||||
<div style="font-size: 48px; margin-bottom: 16px;">
|
||||
<i class="bi bi-envelope-fill"></i>
|
||||
</div>
|
||||
<h3 style="font-size: 1.25rem; font-weight: 600; margin-bottom: 12px; color: #202023;">Email</h3>
|
||||
<p style="font-size: 1rem; opacity: 0.9; margin: 0; color: #202023;">contact@skyartshop.com</p>
|
||||
</div>
|
||||
|
||||
<!-- Location Card -->
|
||||
<div style="background: linear-gradient(135deg, #F6CCDE 0%, #FCB1D8 100%); padding: 32px; border-radius: 16px; text-align: center; color: #202023; box-shadow: 0 8px 24px rgba(252, 177, 216, 0.3);">
|
||||
<div style="font-size: 48px; margin-bottom: 16px;">
|
||||
<i class="bi bi-geo-alt-fill"></i>
|
||||
</div>
|
||||
<h3 style="font-size: 1.25rem; font-weight: 600; margin-bottom: 12px; color: #202023;">Location</h3>
|
||||
<p style="font-size: 1rem; opacity: 0.9; margin: 0; color: #202023;">123 Art Street, Creative City, CC 12345</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Business Hours -->
|
||||
<div style="background: linear-gradient(135deg, #FCB1D8 0%, #FFD0D0 50%, #F6CCDE 100%); padding: 40px; border-radius: 16px; text-align: center; color: #202023; box-shadow: 0 8px 24px rgba(252, 177, 216, 0.3);">
|
||||
<h3 style="font-size: 1.5rem; font-weight: 700; margin-bottom: 24px; color: #202023;">Business Hours</h3>
|
||||
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 20px; max-width: 800px; margin: 0 auto;">
|
||||
<div>
|
||||
<p style="font-weight: 600; margin-bottom: 8px; color: #202023;">Monday - Friday</p>
|
||||
<p style="opacity: 0.85; margin: 0; color: #202023;">9:00 AM - 6:00 PM</p>
|
||||
</div>
|
||||
<div>
|
||||
<p style="font-weight: 600; margin-bottom: 8px; color: #202023;">Saturday</p>
|
||||
<p style="opacity: 0.85; margin: 0; color: #202023;">10:00 AM - 4:00 PM</p>
|
||||
</div>
|
||||
<div>
|
||||
<p style="font-weight: 600; margin-bottom: 8px; color: #202023;">Sunday</p>
|
||||
<p style="opacity: 0.85; margin: 0; color: #202023;">Closed</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
|
||||
async function fixContactColors() {
|
||||
try {
|
||||
logger.info(
|
||||
"🎨 Updating contact page colors to match pink color palette..."
|
||||
);
|
||||
|
||||
const result = await query(
|
||||
`UPDATE pages
|
||||
SET pagecontent = $1,
|
||||
updatedat = CURRENT_TIMESTAMP
|
||||
WHERE slug = 'contact'
|
||||
RETURNING id, slug`,
|
||||
[UPDATED_CONTACT_CONTENT]
|
||||
);
|
||||
|
||||
if (result.rowCount > 0) {
|
||||
logger.info("✅ Contact page colors updated successfully!");
|
||||
logger.info(
|
||||
` Updated page: ${result.rows[0].slug} (ID: ${result.rows[0].id})`
|
||||
);
|
||||
console.log(
|
||||
"\n✅ SUCCESS: Contact page now uses the pink color palette!"
|
||||
);
|
||||
console.log("\nUpdated gradients:");
|
||||
console.log(" • Phone card: #FFEBEB → #FFD0D0 (light pink)");
|
||||
console.log(" • Email card: #FFD0D0 → #FCB1D8 (medium pink)");
|
||||
console.log(" • Location card: #F6CCDE → #FCB1D8 (rosy pink)");
|
||||
console.log(
|
||||
" • Business Hours: #FCB1D8 → #FFD0D0 → #F6CCDE (multi-tone pink)"
|
||||
);
|
||||
console.log(" • All text: #202023 (dark charcoal)\n");
|
||||
} else {
|
||||
logger.warn("⚠️ No contact page found to update");
|
||||
console.log("\n⚠️ WARNING: Contact page not found in database");
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
logger.error("❌ Error updating contact page colors:", error);
|
||||
console.error("\n❌ ERROR:", error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run the fix
|
||||
fixContactColors();
|
||||
77
backend/health-check.sh
Executable file
77
backend/health-check.sh
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
# Database Health Check Script
|
||||
# Quick verification of database status
|
||||
|
||||
echo "🏥 SkyArtShop Database Health Check"
|
||||
echo "====================================="
|
||||
echo ""
|
||||
|
||||
# Check PostgreSQL is running
|
||||
echo "1️⃣ PostgreSQL Status:"
|
||||
if sudo systemctl is-active --quiet postgresql; then
|
||||
echo " ✅ PostgreSQL is running"
|
||||
else
|
||||
echo " ❌ PostgreSQL is not running"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check backend server
|
||||
echo "2️⃣ Backend Server:"
|
||||
if pm2 list | grep -q "skyartshop-backend.*online"; then
|
||||
echo " ✅ Backend server is online"
|
||||
else
|
||||
echo " ⚠️ Backend server status unknown"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test database connection
|
||||
echo "3️⃣ Database Connection:"
|
||||
if node -e "const {pool}=require('./config/database');pool.query('SELECT 1').then(()=>{console.log(' ✅ Connection successful');pool.end();process.exit(0);}).catch(e=>{console.log(' ❌ Connection failed:',e.message);pool.end();process.exit(1);});" 2>/dev/null; then
|
||||
true
|
||||
else
|
||||
echo " ❌ Cannot connect to database"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check row counts
|
||||
echo "4️⃣ Data Status:"
|
||||
node -e "const {query,pool}=require('./config/database');(async()=>{try{const r=await query('SELECT (SELECT COUNT(*) FROM products) as products, (SELECT COUNT(*) FROM portfolioprojects) as portfolio, (SELECT COUNT(*) FROM blogposts) as blog, (SELECT COUNT(*) FROM pages) as pages');const d=r.rows[0];console.log(' Products:',d.products);console.log(' Portfolio:',d.portfolio);console.log(' Blog:',d.blog);console.log(' Pages:',d.pages);}catch(e){console.log(' ❌',e.message);}finally{await pool.end();}})()" 2>/dev/null
|
||||
echo ""
|
||||
|
||||
# Check indexes
|
||||
echo "5️⃣ Database Indexes:"
|
||||
node -e "const {query,pool}=require('./config/database');(async()=>{try{const r=await query(\"SELECT COUNT(*) as count FROM pg_indexes WHERE schemaname='public' AND tablename IN ('products','product_images','portfolioprojects','blogposts','pages')\");console.log(' Total indexes:',r.rows[0].count);}catch(e){console.log(' ❌',e.message);}finally{await pool.end();}})()" 2>/dev/null
|
||||
echo ""
|
||||
|
||||
# Test API endpoints
|
||||
echo "6️⃣ API Endpoints:"
|
||||
if curl -s http://localhost:5000/api/products > /dev/null 2>&1; then
|
||||
echo " ✅ /api/products"
|
||||
else
|
||||
echo " ❌ /api/products"
|
||||
fi
|
||||
|
||||
if curl -s http://localhost:5000/api/portfolio/projects > /dev/null 2>&1; then
|
||||
echo " ✅ /api/portfolio/projects"
|
||||
else
|
||||
echo " ❌ /api/portfolio/projects"
|
||||
fi
|
||||
|
||||
if curl -s http://localhost:5000/api/categories > /dev/null 2>&1; then
|
||||
echo " ✅ /api/categories"
|
||||
else
|
||||
echo " ❌ /api/categories"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Cache performance
|
||||
echo "7️⃣ Cache Performance:"
|
||||
node -e "const {query,pool}=require('./config/database');(async()=>{try{const r=await query(\"SELECT CASE WHEN sum(heap_blks_hit)+sum(heap_blks_read)>0 THEN round(100.0*sum(heap_blks_hit)/(sum(heap_blks_hit)+sum(heap_blks_read)),2) ELSE 0 END as ratio FROM pg_statio_user_tables WHERE schemaname='public'\");const ratio=r.rows[0].ratio;const status=ratio>99?'✅':ratio>95?'⚠️':'❌';console.log(' ',status,'Cache hit ratio:',ratio+'%','(target: >99%)');}catch(e){console.log(' ❌',e.message);}finally{await pool.end();}})()" 2>/dev/null
|
||||
echo ""
|
||||
|
||||
echo "✅ Health check complete!"
|
||||
echo ""
|
||||
echo "To see detailed analysis, run:"
|
||||
echo " node analyze-queries.js"
|
||||
310
backend/middleware/apiOptimization.js
Normal file
310
backend/middleware/apiOptimization.js
Normal file
@@ -0,0 +1,310 @@
|
||||
/**
|
||||
* API Response Optimization Middleware
|
||||
* Implements response batching, field filtering, and pagination
|
||||
*/
|
||||
const logger = require("../config/logger");
|
||||
|
||||
/**
|
||||
* Enable response compression for API endpoints
|
||||
*/
|
||||
const enableCompression = (req, res, next) => {
|
||||
// Already handled by global compression middleware
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* Add cache headers for GET requests
|
||||
* SAFEGUARD: Checks headers not already sent before setting
|
||||
*/
|
||||
const addCacheHeaders = (maxAge = 300) => {
|
||||
return (req, res, next) => {
|
||||
if (req.method === "GET" && !res.headersSent) {
|
||||
try {
|
||||
res.set({
|
||||
"Cache-Control": `public, max-age=${maxAge}`,
|
||||
Vary: "Accept-Encoding",
|
||||
});
|
||||
} catch (error) {
|
||||
logger.warn("Failed to set cache headers", { error: error.message });
|
||||
}
|
||||
}
|
||||
next();
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Field filtering middleware
|
||||
* Allows clients to request only specific fields: ?fields=id,name,price
|
||||
* SAFEGUARD: Validates field names to prevent injection attacks
|
||||
*/
|
||||
const fieldFilter = (req, res, next) => {
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
const fields = req.query.fields;
|
||||
|
||||
if (!fields || !data || res.headersSent) {
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
try {
|
||||
// SAFEGUARD: Validate field names (alphanumeric, underscore, dot only)
|
||||
if (!/^[a-zA-Z0-9_.,\s]+$/.test(fields)) {
|
||||
logger.warn("Invalid field filter attempted", { fields });
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
const fieldList = fields
|
||||
.split(",")
|
||||
.map((f) => f.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
// SAFEGUARD: Limit number of fields
|
||||
if (fieldList.length > 50) {
|
||||
logger.warn("Too many fields requested", { count: fieldList.length });
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
const filterObject = (obj) => {
|
||||
if (!obj || typeof obj !== "object") return obj;
|
||||
|
||||
const filtered = {};
|
||||
fieldList.forEach((field) => {
|
||||
if (field in obj) {
|
||||
filtered[field] = obj[field];
|
||||
}
|
||||
});
|
||||
return filtered;
|
||||
};
|
||||
|
||||
if (Array.isArray(data)) {
|
||||
data = data.map(filterObject);
|
||||
} else if (data.success !== undefined && data.data) {
|
||||
// Handle wrapped responses
|
||||
if (Array.isArray(data.data)) {
|
||||
data.data = data.data.map(filterObject);
|
||||
} else {
|
||||
data.data = filterObject(data.data);
|
||||
}
|
||||
} else {
|
||||
data = filterObject(data);
|
||||
}
|
||||
|
||||
return originalJson(data);
|
||||
} catch (error) {
|
||||
logger.error("Field filter error", { error: error.message });
|
||||
return originalJson(data);
|
||||
}
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* Pagination middleware
|
||||
* Adds pagination support: ?page=1&limit=20
|
||||
*/
|
||||
const paginate = (defaultLimit = 20, maxLimit = 100) => {
|
||||
return (req, res, next) => {
|
||||
const page = Math.max(1, parseInt(req.query.page) || 1);
|
||||
const limit = Math.min(
|
||||
maxLimit,
|
||||
Math.max(1, parseInt(req.query.limit) || defaultLimit)
|
||||
);
|
||||
const offset = (page - 1) * limit;
|
||||
|
||||
req.pagination = {
|
||||
page,
|
||||
limit,
|
||||
offset,
|
||||
maxLimit,
|
||||
};
|
||||
|
||||
// Helper to add pagination info to response
|
||||
res.paginate = (data, total) => {
|
||||
const totalPages = Math.ceil(total / limit);
|
||||
return res.json({
|
||||
success: true,
|
||||
data,
|
||||
pagination: {
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
totalPages,
|
||||
hasNext: page < totalPages,
|
||||
hasPrev: page > 1,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Response time tracking
|
||||
* SAFEGUARD: Checks headers not sent before setting X-Response-Time header
|
||||
*/
|
||||
const trackResponseTime = (req, res, next) => {
|
||||
const start = Date.now();
|
||||
|
||||
res.on("finish", () => {
|
||||
const duration = Date.now() - start;
|
||||
|
||||
// Log slow requests
|
||||
if (duration > 1000) {
|
||||
logger.warn("Slow API request", {
|
||||
method: req.method,
|
||||
path: req.path,
|
||||
duration: `${duration}ms`,
|
||||
status: res.statusCode,
|
||||
});
|
||||
}
|
||||
|
||||
// Add response time header only if headers haven't been sent
|
||||
if (!res.headersSent) {
|
||||
try {
|
||||
res.set("X-Response-Time", `${duration}ms`);
|
||||
} catch (error) {
|
||||
logger.debug("Could not set X-Response-Time header", {
|
||||
error: error.message,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* ETag generation for GET requests
|
||||
* SAFEGUARD: Checks headersSent before setting headers
|
||||
*/
|
||||
const generateETag = (req, res, next) => {
|
||||
if (req.method !== "GET") {
|
||||
return next();
|
||||
}
|
||||
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
try {
|
||||
// SAFEGUARD: Don't process if headers already sent
|
||||
if (res.headersSent) {
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
// Generate simple ETag from stringified data
|
||||
const dataStr = JSON.stringify(data);
|
||||
const etag = `W/"${Buffer.from(dataStr).length.toString(16)}"`;
|
||||
|
||||
// Check if client has cached version
|
||||
if (req.headers["if-none-match"] === etag) {
|
||||
res.status(304).end();
|
||||
return;
|
||||
}
|
||||
|
||||
res.set("ETag", etag);
|
||||
return originalJson(data);
|
||||
} catch (error) {
|
||||
logger.error("ETag generation error", { error: error.message });
|
||||
return originalJson(data);
|
||||
}
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* JSON response size optimization
|
||||
* Removes null values and compacts responses
|
||||
*/
|
||||
const optimizeJSON = (req, res, next) => {
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
if (data && typeof data === "object") {
|
||||
data = removeNulls(data);
|
||||
}
|
||||
return originalJson(data);
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
function removeNulls(obj) {
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(removeNulls);
|
||||
}
|
||||
|
||||
if (obj !== null && typeof obj === "object") {
|
||||
return Object.entries(obj).reduce((acc, [key, value]) => {
|
||||
if (value !== null && value !== undefined) {
|
||||
acc[key] = removeNulls(value);
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch request handler
|
||||
* Allows multiple API calls in a single request
|
||||
* POST /api/batch with body: { requests: [{ method, url, body }] }
|
||||
*/
|
||||
const batchHandler = async (req, res) => {
|
||||
const { requests } = req.body;
|
||||
|
||||
if (!Array.isArray(requests) || requests.length === 0) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: "Invalid batch request format",
|
||||
});
|
||||
}
|
||||
|
||||
if (requests.length > 10) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: "Maximum 10 requests per batch",
|
||||
});
|
||||
}
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
requests.map(async (request) => {
|
||||
try {
|
||||
// This would require implementation of internal request handling
|
||||
// For now, return a placeholder
|
||||
return {
|
||||
status: 200,
|
||||
data: { message: "Batch processing not fully implemented" },
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
status: 500,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
results: results.map((result, index) => ({
|
||||
...requests[index],
|
||||
...result,
|
||||
})),
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
enableCompression,
|
||||
addCacheHeaders,
|
||||
fieldFilter,
|
||||
paginate,
|
||||
trackResponseTime,
|
||||
generateETag,
|
||||
optimizeJSON,
|
||||
batchHandler,
|
||||
};
|
||||
339
backend/middleware/apiOptimization.js.corrupt
Normal file
339
backend/middleware/apiOptimization.js.corrupt
Normal file
@@ -0,0 +1,339 @@
|
||||
/**
|
||||
* API Response Optimization Middleware
|
||||
* Implements response batching, field filtering, and pagination
|
||||
*/
|
||||
const logger = require("../config/logger");
|
||||
|
||||
/**
|
||||
* Enable response compression for API endpoints
|
||||
*/
|
||||
const enableCompression = (req, res, next) => {
|
||||
// Already handled by global compression middleware
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* Add cache headers for GET requests
|
||||
*/
|
||||
const addCacheHeaders = (maxAge = 300) => {
|
||||
return (req, res, next) => {
|
||||
if (req.method === "GET" && !res.headersSent) {
|
||||
try {
|
||||
res.set({
|
||||
"Cache-Control": `public, max-age=${maxAge}`,
|
||||
Vary: "Accept-Encoding",
|
||||
});
|
||||
} catch (error) {
|
||||
logger.warn("Failed to set cache headers", { error: error.message });
|
||||
}
|
||||
}
|
||||
next();
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Field filtering middleware
|
||||
* Allows clients to request only specific fields: ?fields=id,name,price
|
||||
* SAFEGUARD: Validates field names to prevent injection attacks
|
||||
*/
|
||||
const fieldFilter = (req, res, next) => {
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
const fields = req.query.fields;
|
||||
|
||||
if (!fields || !data || res.headersSent) {
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
try {
|
||||
// SAFEGUARD: Validate field names (alphanumeric, underscore, dot only)
|
||||
if (!/^[a-zA-Z0-9_.,\s]+$/.test(fields)) {
|
||||
logger.warn("Invalid field filter attempted", { fields });
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
const fieldList = fields.split(",").map((f) => f.trim()).filter(Boolean);
|
||||
|
||||
// SAFEGUARD: Limit number of fields
|
||||
if (fieldList.length > 50) {
|
||||
logger.warn("Too many fields requested", { count: fieldList.length });
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
const filterObject = (obj) => {
|
||||
if (!obj || typeof obj !== "object") return obj;
|
||||
|
||||
const filtered = {};
|
||||
fieldList.forEach((field) => {
|
||||
if (field in obj) {
|
||||
filtered[field] = obj[field];
|
||||
}
|
||||
});
|
||||
return filtered;
|
||||
};
|
||||
|
||||
if (Array.isArray(data)) {
|
||||
data = data.map(filterObject);
|
||||
} else if (data.success !== undefined && data.data) {
|
||||
// Handle wrapped responses
|
||||
if (Array.isArray(data.data)) {
|
||||
data.data = data.data.map(filterObject);
|
||||
} else {
|
||||
data.data = filterObject(data.data);
|
||||
}
|
||||
} else {
|
||||
data = filterObject(data);
|
||||
}
|
||||
|
||||
return originalJson(data);
|
||||
} catch (error) {
|
||||
logger.error("Field filter error", { error: error.message });
|
||||
return originalJson(data);
|
||||
}
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* Pagination middleware
|
||||
* Adds pagination support: ?page=1&limit=20
|
||||
*/
|
||||
const paginate = (defaultLimit = 20, maxLimit = 100) => {
|
||||
return (req, res, next) => {
|
||||
const page = Math.max(1, parseInt(req.query.page) || 1);
|
||||
const limit = Math.min(
|
||||
maxLimit,
|
||||
Math.max(1, parseInt(req.query.limit) || defaultLimit)
|
||||
);
|
||||
const offset = (page - 1) * limit;
|
||||
|
||||
req.pagination = {
|
||||
page,
|
||||
limit,
|
||||
offset,
|
||||
maxLimit,
|
||||
};
|
||||
|
||||
// Helper to add pagination info to response
|
||||
res.paginate = (data, total) => {
|
||||
const totalPages = Math.ceil(total / limit);
|
||||
return res.json({
|
||||
success: true,
|
||||
data,
|
||||
pagination: {
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
totalPages,
|
||||
hasNext: page < totalPages,
|
||||
hasPrev: page > 1,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Response time tracking
|
||||
*/
|
||||
const trackResponseTime = (req, res, next) => {
|
||||
const start = Date.now();
|
||||
|
||||
res.on("finish", () => {
|
||||
const duration = Date.now() - start;
|
||||
|
||||
// Log slow requests
|
||||
if (duration > 1000) {
|
||||
logger.warn("Slow API request", {
|
||||
method: req.method,
|
||||
path: req.path,
|
||||
duration: `${duration}ms`,
|
||||
status: res.statusCode,
|
||||
});
|
||||
}
|
||||
|
||||
// Add response time header only if headers haven't been sent
|
||||
if (!res.headersSent) {
|
||||
res.set("X-Response-Time", `${duration}ms`);
|
||||
}
|
||||
});
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* ETag generation for GET requests
|
||||
* SAFEGUARD: Checks headersSent before setting headers
|
||||
*/
|
||||
const generateETag = (req, res, next) => {
|
||||
if (req.method !== "GET") {
|
||||
return next();
|
||||
}
|
||||
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
try {
|
||||
// SAFEGUARD: Don't process if headers already sent
|
||||
if (res.headersSent) {
|
||||
return originalJson(data);
|
||||
}
|
||||
|
||||
// Generate simple ETag from stringified data
|
||||
const dataStr = JSON.stringify(data);
|
||||
const etag = `W/"${Buffer.from(dataStr).length.toString(16)}"`;
|
||||
|
||||
// Check if client has cached version
|
||||
if (req.headers["if-none-match"] === etag) {
|
||||
res.status(304).end();
|
||||
return;
|
||||
}
|
||||
|
||||
res.set("ETag", etag);
|
||||
return originalJson(data);
|
||||
} catch (error) {
|
||||
logger.error("ETag generation error", { error: error.message });
|
||||
return originalJson(data);
|
||||
}
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
/**
|
||||
* JSON response size optimization
|
||||
* Removes null values and compacts responses
|
||||
*/
|
||||
const optimizeJSON = (req, res, next) => {
|
||||
const originalJson = res.json.bind(res);
|
||||
|
||||
res.json = function (data) {
|
||||
if (data && typeof data === "object") {
|
||||
data = removeNulls(data);
|
||||
}
|
||||
return originalJson(data);
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
function removeNulls(obj) {
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(removeNulls);
|
||||
}
|
||||
|
||||
if (obj !== null && typeof obj === "object") {
|
||||
return Object.entries(obj).reduce((acc, [key, value]) => {
|
||||
if (value !== null && value !== undefined) {
|
||||
acc[key] = removeNulls(value);
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch request handler
|
||||
* Allows multiple API calls in a single request
|
||||
* POST /api/batch with body: { requests: [{ method, url, body }] }
|
||||
* SAFEGUARD: Enhanced validation and error handling
|
||||
*/
|
||||
const batchHandler = async (req, res) => {
|
||||
try {
|
||||
const { requests } = req.body;
|
||||
|
||||
// SAFEGUARD: Validate requests array
|
||||
if (!Array.isArray(requests) || requests.length === 0) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: "Invalid batch request format",
|
||||
});
|
||||
}
|
||||
|
||||
// SAFEGUARD: Limit batch size
|
||||
if (requests.length > 10) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: "Maximum 10 requests per batch",
|
||||
});
|
||||
}
|
||||
|
||||
// SAFEGUARD: Validate each request structure
|
||||
const isValid = requests.every(req =>
|
||||
req && typeof req === 'object' &&
|
||||
req.method && req.url &&
|
||||
['GET', 'POST', 'PUT', 'DELETE'].includes(req.method.toUpperCase())
|
||||
);
|
||||
|
||||
if (!isValid) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: "Invalid request format in batch",
|
||||
});
|
||||
}
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
requests.map(async (request) => {
|
||||
try {
|
||||
// This would require implementation of internal request handling
|
||||
// For now, return a placeholder
|
||||
return {
|
||||
status: 200,
|
||||
data: { message: "Batch processing not fully implemented" },
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
status: 500,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
// SAFEGUARD: Check if response already sent
|
||||
if (res.headersSent) {
|
||||
logger.warn("Response already sent in batch handler");
|
||||
return;
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
results: results.map((result, index) => ({
|
||||
...requests[index],
|
||||
...result,
|
||||
})),
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error("Batch handler error", { error: error.message, stack: error.stack });
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: "Batch processing failed",
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
results: results.map((result, index) => ({
|
||||
...requests[index],
|
||||
...result,
|
||||
})),
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
enableCompression,
|
||||
addCacheHeaders,
|
||||
fieldFilter,
|
||||
paginate,
|
||||
trackResponseTime,
|
||||
generateETag,
|
||||
optimizeJSON,
|
||||
batchHandler,
|
||||
};
|
||||
152
backend/middleware/bruteForceProtection.js
Normal file
152
backend/middleware/bruteForceProtection.js
Normal file
@@ -0,0 +1,152 @@
|
||||
/**
|
||||
* Brute force protection middleware
|
||||
* Tracks failed login attempts and temporarily blocks IPs with too many failures
|
||||
*/
|
||||
|
||||
const logger = require("../config/logger");
|
||||
|
||||
// Store failed attempts in memory (use Redis in production)
|
||||
const failedAttempts = new Map();
|
||||
const blockedIPs = new Map();
|
||||
|
||||
// Configuration
|
||||
const MAX_FAILED_ATTEMPTS = 5;
|
||||
const BLOCK_DURATION = 15 * 60 * 1000; // 15 minutes
|
||||
const ATTEMPT_WINDOW = 15 * 60 * 1000; // 15 minutes
|
||||
const CLEANUP_INTERVAL = 60 * 1000; // 1 minute
|
||||
|
||||
/**
|
||||
* Clean up old entries periodically
|
||||
*/
|
||||
const cleanup = () => {
|
||||
const now = Date.now();
|
||||
|
||||
// Clean up failed attempts
|
||||
for (const [ip, data] of failedAttempts.entries()) {
|
||||
if (now - data.firstAttempt > ATTEMPT_WINDOW) {
|
||||
failedAttempts.delete(ip);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up blocked IPs
|
||||
for (const [ip, blockTime] of blockedIPs.entries()) {
|
||||
if (now - blockTime > BLOCK_DURATION) {
|
||||
blockedIPs.delete(ip);
|
||||
logger.info("IP unblocked after cooldown", { ip });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Start cleanup interval
|
||||
setInterval(cleanup, CLEANUP_INTERVAL);
|
||||
|
||||
/**
|
||||
* Record a failed login attempt
|
||||
* @param {string} ip - IP address
|
||||
*/
|
||||
const recordFailedAttempt = (ip) => {
|
||||
const now = Date.now();
|
||||
|
||||
if (!failedAttempts.has(ip)) {
|
||||
failedAttempts.set(ip, {
|
||||
count: 1,
|
||||
firstAttempt: now,
|
||||
});
|
||||
} else {
|
||||
const data = failedAttempts.get(ip);
|
||||
|
||||
// Reset if outside window
|
||||
if (now - data.firstAttempt > ATTEMPT_WINDOW) {
|
||||
data.count = 1;
|
||||
data.firstAttempt = now;
|
||||
} else {
|
||||
data.count++;
|
||||
}
|
||||
|
||||
// Block if too many attempts
|
||||
if (data.count >= MAX_FAILED_ATTEMPTS) {
|
||||
blockedIPs.set(ip, now);
|
||||
logger.warn("IP blocked due to failed login attempts", {
|
||||
ip,
|
||||
attempts: data.count,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Reset failed attempts for an IP (on successful login)
|
||||
* @param {string} ip - IP address
|
||||
*/
|
||||
const resetFailedAttempts = (ip) => {
|
||||
failedAttempts.delete(ip);
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if an IP is currently blocked
|
||||
* @param {string} ip - IP address
|
||||
* @returns {boolean}
|
||||
*/
|
||||
const isBlocked = (ip) => {
|
||||
if (!blockedIPs.has(ip)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const blockTime = blockedIPs.get(ip);
|
||||
const now = Date.now();
|
||||
|
||||
// Check if block has expired
|
||||
if (now - blockTime > BLOCK_DURATION) {
|
||||
blockedIPs.delete(ip);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get remaining block time in seconds
|
||||
* @param {string} ip - IP address
|
||||
* @returns {number} Seconds remaining
|
||||
*/
|
||||
const getRemainingBlockTime = (ip) => {
|
||||
if (!blockedIPs.has(ip)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const blockTime = blockedIPs.get(ip);
|
||||
const elapsed = Date.now() - blockTime;
|
||||
const remaining = Math.max(0, BLOCK_DURATION - elapsed);
|
||||
|
||||
return Math.ceil(remaining / 1000);
|
||||
};
|
||||
|
||||
/**
|
||||
* Middleware to check if IP is blocked
|
||||
*/
|
||||
const checkBlocked = (req, res, next) => {
|
||||
const ip = req.ip || req.connection.remoteAddress;
|
||||
|
||||
if (isBlocked(ip)) {
|
||||
const remainingSeconds = getRemainingBlockTime(ip);
|
||||
logger.warn("Blocked IP attempted access", { ip, path: req.path });
|
||||
|
||||
return res.status(429).json({
|
||||
success: false,
|
||||
message: `Too many failed attempts. Please try again in ${Math.ceil(
|
||||
remainingSeconds / 60
|
||||
)} minutes.`,
|
||||
retryAfter: remainingSeconds,
|
||||
});
|
||||
}
|
||||
|
||||
next();
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
recordFailedAttempt,
|
||||
resetFailedAttempts,
|
||||
isBlocked,
|
||||
checkBlocked,
|
||||
getRemainingBlockTime,
|
||||
};
|
||||
@@ -5,28 +5,63 @@
|
||||
const logger = require("../config/logger");
|
||||
|
||||
class CacheManager {
|
||||
constructor(defaultTTL = 300000) {
|
||||
// 5 minutes default
|
||||
constructor(defaultTTL = 300000, maxSize = 2000) {
|
||||
// 5 minutes default, max 2000 entries (optimized for performance)
|
||||
this.cache = new Map();
|
||||
this.defaultTTL = defaultTTL;
|
||||
this.maxSize = maxSize;
|
||||
this.stats = { hits: 0, misses: 0, evictions: 0 };
|
||||
// Use Map for O(1) LRU tracking instead of array indexOf/splice
|
||||
this.lruHead = null; // Most recently used
|
||||
this.lruTail = null; // Least recently used
|
||||
this.lruNodes = new Map(); // key -> {prev, next, key}
|
||||
}
|
||||
|
||||
set(key, value, ttl = this.defaultTTL) {
|
||||
const expiresAt = Date.now() + ttl;
|
||||
|
||||
// If key exists, remove from LRU list first
|
||||
if (this.cache.has(key)) {
|
||||
this._removeLRUNode(key);
|
||||
} else if (this.cache.size >= this.maxSize) {
|
||||
// Evict least recently used
|
||||
if (this.lruTail) {
|
||||
const evictKey = this.lruTail.key;
|
||||
this.cache.delete(evictKey);
|
||||
this._removeLRUNode(evictKey);
|
||||
this.stats.evictions++;
|
||||
logger.debug(`Cache LRU eviction: ${evictKey}`);
|
||||
}
|
||||
}
|
||||
|
||||
this.cache.set(key, { value, expiresAt });
|
||||
this._addLRUNode(key); // Add to head (most recent)
|
||||
logger.debug(`Cache set: ${key} (TTL: ${ttl}ms)`);
|
||||
}
|
||||
|
||||
get(key) {
|
||||
const cached = this.cache.get(key);
|
||||
if (!cached) return null;
|
||||
|
||||
if (Date.now() > cached.expiresAt) {
|
||||
if (!cached) {
|
||||
this.stats.misses++;
|
||||
logger.debug(`Cache miss: ${key}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
if (now > cached.expiresAt) {
|
||||
this.cache.delete(key);
|
||||
this._removeLRUNode(key);
|
||||
this.stats.misses++;
|
||||
logger.debug(`Cache expired: ${key}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Move to head (most recently used) - O(1)
|
||||
this._removeLRUNode(key);
|
||||
this._addLRUNode(key);
|
||||
|
||||
this.stats.hits++;
|
||||
logger.debug(`Cache hit: ${key}`);
|
||||
return cached.value;
|
||||
}
|
||||
@@ -53,6 +88,9 @@ class CacheManager {
|
||||
clear() {
|
||||
const size = this.cache.size;
|
||||
this.cache.clear();
|
||||
this.lruNodes.clear();
|
||||
this.lruHead = null;
|
||||
this.lruTail = null;
|
||||
logger.info(`Cache cleared (${size} keys)`);
|
||||
}
|
||||
|
||||
@@ -60,6 +98,63 @@ class CacheManager {
|
||||
return this.cache.size;
|
||||
}
|
||||
|
||||
// Get cache statistics
|
||||
getStats() {
|
||||
const hitRate =
|
||||
this.stats.hits + this.stats.misses > 0
|
||||
? (
|
||||
(this.stats.hits / (this.stats.hits + this.stats.misses)) *
|
||||
100
|
||||
).toFixed(2)
|
||||
: 0;
|
||||
return {
|
||||
...this.stats,
|
||||
hitRate: `${hitRate}%`,
|
||||
size: this.cache.size,
|
||||
maxSize: this.maxSize,
|
||||
};
|
||||
}
|
||||
|
||||
// Reset statistics
|
||||
resetStats() {
|
||||
this.stats = { hits: 0, misses: 0, evictions: 0 };
|
||||
}
|
||||
|
||||
// O(1) LRU operations using doubly-linked list pattern
|
||||
_addLRUNode(key) {
|
||||
const node = { key, prev: null, next: this.lruHead };
|
||||
|
||||
if (this.lruHead) {
|
||||
this.lruHead.prev = node;
|
||||
}
|
||||
this.lruHead = node;
|
||||
|
||||
if (!this.lruTail) {
|
||||
this.lruTail = node;
|
||||
}
|
||||
|
||||
this.lruNodes.set(key, node);
|
||||
}
|
||||
|
||||
_removeLRUNode(key) {
|
||||
const node = this.lruNodes.get(key);
|
||||
if (!node) return;
|
||||
|
||||
if (node.prev) {
|
||||
node.prev.next = node.next;
|
||||
} else {
|
||||
this.lruHead = node.next;
|
||||
}
|
||||
|
||||
if (node.next) {
|
||||
node.next.prev = node.prev;
|
||||
} else {
|
||||
this.lruTail = node.prev;
|
||||
}
|
||||
|
||||
this.lruNodes.delete(key);
|
||||
}
|
||||
|
||||
// Clean up expired entries
|
||||
cleanup() {
|
||||
const now = Date.now();
|
||||
|
||||
@@ -1,30 +1,46 @@
|
||||
/**
|
||||
* Response Compression Middleware
|
||||
* Compresses API responses to reduce payload size
|
||||
* High-performance compression with Brotli support
|
||||
*/
|
||||
const compression = require("compression");
|
||||
const zlib = require("zlib");
|
||||
|
||||
const compressionMiddleware = compression({
|
||||
// Only compress responses larger than 1kb
|
||||
threshold: 1024,
|
||||
// Compression level (0-9, higher = better compression but slower)
|
||||
// Only compress responses larger than 512 bytes (lower threshold)
|
||||
threshold: 512,
|
||||
// Level 6 for gzip (balance between speed and ratio)
|
||||
level: 6,
|
||||
// Memory level
|
||||
memLevel: 8,
|
||||
// Use Brotli when available (better compression than gzip)
|
||||
brotli: {
|
||||
enabled: true,
|
||||
zlib: {
|
||||
[zlib.constants.BROTLI_PARAM_QUALITY]: 4, // 0-11, 4 is fast with good compression
|
||||
[zlib.constants.BROTLI_PARAM_MODE]: zlib.constants.BROTLI_MODE_TEXT,
|
||||
},
|
||||
},
|
||||
// Filter function - don't compress already compressed formats
|
||||
filter: (req, res) => {
|
||||
if (req.headers["x-no-compression"]) {
|
||||
return false;
|
||||
}
|
||||
// Check content-type
|
||||
|
||||
const contentType = res.getHeader("Content-Type");
|
||||
if (!contentType) return compression.filter(req, res);
|
||||
|
||||
// Don't compress images, videos, or already compressed formats
|
||||
if (
|
||||
contentType.includes("image/") ||
|
||||
contentType.includes("video/") ||
|
||||
contentType.includes("application/zip") ||
|
||||
contentType.includes("application/pdf")
|
||||
) {
|
||||
const skipTypes = [
|
||||
"image/",
|
||||
"video/",
|
||||
"application/zip",
|
||||
"application/pdf",
|
||||
"application/octet-stream",
|
||||
"application/wasm",
|
||||
"font/",
|
||||
];
|
||||
|
||||
if (skipTypes.some((type) => contentType.includes(type))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -62,6 +62,15 @@ const errorHandler = (err, req, res, next) => {
|
||||
error.statusCode = errorMapping.statusCode;
|
||||
}
|
||||
|
||||
// SAFEGUARD: Don't send response if headers already sent
|
||||
if (res.headersSent) {
|
||||
logger.warn("Headers already sent in error handler", {
|
||||
path: req.path,
|
||||
error: error.message,
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
|
||||
res.status(error.statusCode).json({
|
||||
success: false,
|
||||
message: error.message || "Server error",
|
||||
@@ -89,6 +98,12 @@ const notFoundHandler = (req, res) => {
|
||||
});
|
||||
}
|
||||
|
||||
// SAFEGUARD: Check if response already sent
|
||||
if (res.headersSent) {
|
||||
logger.warn("Headers already sent in 404 handler", { path: req.path });
|
||||
return;
|
||||
}
|
||||
|
||||
res.status(404).json({
|
||||
success: false,
|
||||
message: "Route not found",
|
||||
|
||||
129
backend/middleware/imageOptimization.js
Normal file
129
backend/middleware/imageOptimization.js
Normal file
@@ -0,0 +1,129 @@
|
||||
/**
|
||||
* Image Optimization Middleware
|
||||
* High-performance image serving with streaming and caching
|
||||
*/
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
const fsPromises = require("fs").promises;
|
||||
const logger = require("../config/logger");
|
||||
|
||||
// Cache for image metadata (not content)
|
||||
const metadataCache = new Map();
|
||||
const METADATA_CACHE_TTL = 600000; // 10 minutes
|
||||
const METADATA_CACHE_MAX = 1000;
|
||||
|
||||
// Image mime types
|
||||
const MIME_TYPES = {
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
".svg": "image/svg+xml",
|
||||
".ico": "image/x-icon",
|
||||
".avif": "image/avif",
|
||||
};
|
||||
|
||||
/**
|
||||
* Get or cache image metadata
|
||||
*/
|
||||
async function getImageMetadata(filePath) {
|
||||
const cached = metadataCache.get(filePath);
|
||||
if (cached && Date.now() - cached.timestamp < METADATA_CACHE_TTL) {
|
||||
return cached.data;
|
||||
}
|
||||
|
||||
try {
|
||||
const stats = await fsPromises.stat(filePath);
|
||||
const metadata = {
|
||||
exists: true,
|
||||
size: stats.size,
|
||||
mtime: stats.mtime.getTime(),
|
||||
etag: `"${stats.size}-${stats.mtime.getTime()}"`,
|
||||
lastModified: stats.mtime.toUTCString(),
|
||||
};
|
||||
|
||||
// LRU eviction
|
||||
if (metadataCache.size >= METADATA_CACHE_MAX) {
|
||||
const firstKey = metadataCache.keys().next().value;
|
||||
metadataCache.delete(firstKey);
|
||||
}
|
||||
|
||||
metadataCache.set(filePath, { data: metadata, timestamp: Date.now() });
|
||||
return metadata;
|
||||
} catch {
|
||||
const notFound = { exists: false };
|
||||
metadataCache.set(filePath, { data: notFound, timestamp: Date.now() });
|
||||
return notFound;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serve optimized images with streaming and aggressive caching
|
||||
*/
|
||||
const imageOptimization = (uploadsDir) => {
|
||||
return async (req, res, next) => {
|
||||
// Only handle image requests
|
||||
const ext = path.extname(req.path).toLowerCase();
|
||||
if (!MIME_TYPES[ext]) {
|
||||
return next();
|
||||
}
|
||||
|
||||
const imagePath = path.join(uploadsDir, req.path.replace("/uploads/", ""));
|
||||
|
||||
// Get cached metadata
|
||||
const metadata = await getImageMetadata(imagePath);
|
||||
if (!metadata.exists) {
|
||||
return next();
|
||||
}
|
||||
|
||||
try {
|
||||
// Check if client has cached version (304 Not Modified)
|
||||
const ifNoneMatch = req.get("if-none-match");
|
||||
const ifModifiedSince = req.get("if-modified-since");
|
||||
|
||||
if (
|
||||
ifNoneMatch === metadata.etag ||
|
||||
ifModifiedSince === metadata.lastModified
|
||||
) {
|
||||
return res.status(304).end();
|
||||
}
|
||||
|
||||
// Set aggressive caching headers
|
||||
res.set({
|
||||
"Content-Type": MIME_TYPES[ext],
|
||||
"Content-Length": metadata.size,
|
||||
"Cache-Control": "public, max-age=31536000, immutable", // 1 year
|
||||
ETag: metadata.etag,
|
||||
"Last-Modified": metadata.lastModified,
|
||||
Vary: "Accept-Encoding",
|
||||
"X-Content-Type-Options": "nosniff",
|
||||
});
|
||||
|
||||
// Use streaming for efficient memory usage
|
||||
const readStream = fs.createReadStream(imagePath, {
|
||||
highWaterMark: 64 * 1024, // 64KB chunks
|
||||
});
|
||||
|
||||
readStream.on("error", (error) => {
|
||||
logger.error("Image stream error:", {
|
||||
path: imagePath,
|
||||
error: error.message,
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.status(500).end();
|
||||
}
|
||||
});
|
||||
|
||||
readStream.pipe(res);
|
||||
} catch (error) {
|
||||
logger.error("Image serve error:", {
|
||||
path: imagePath,
|
||||
error: error.message,
|
||||
});
|
||||
next();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = { imageOptimization };
|
||||
71
backend/middleware/processHandlers.js
Normal file
71
backend/middleware/processHandlers.js
Normal file
@@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Global Process Error Handlers
|
||||
* Safeguards to prevent crashes from unhandled errors
|
||||
*/
|
||||
const logger = require("../config/logger");
|
||||
|
||||
/**
|
||||
* Handle uncaught exceptions
|
||||
*/
|
||||
process.on("uncaughtException", (error) => {
|
||||
logger.error("💥 Uncaught Exception", {
|
||||
error: error.message,
|
||||
stack: error.stack,
|
||||
});
|
||||
|
||||
// Give time to log before exiting
|
||||
setTimeout(() => {
|
||||
process.exit(1);
|
||||
}, 1000);
|
||||
});
|
||||
|
||||
/**
|
||||
* Handle unhandled promise rejections
|
||||
*/
|
||||
process.on("unhandledRejection", (reason, promise) => {
|
||||
logger.error("💥 Unhandled Promise Rejection", {
|
||||
reason: reason instanceof Error ? reason.message : reason,
|
||||
stack: reason instanceof Error ? reason.stack : undefined,
|
||||
promise,
|
||||
});
|
||||
|
||||
// Don't exit - log and continue
|
||||
// In production, you might want to exit: process.exit(1);
|
||||
});
|
||||
|
||||
/**
|
||||
* Handle process warnings
|
||||
*/
|
||||
process.on("warning", (warning) => {
|
||||
logger.warn("⚠️ Process Warning", {
|
||||
name: warning.name,
|
||||
message: warning.message,
|
||||
stack: warning.stack,
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Handle SIGTERM gracefully
|
||||
*/
|
||||
process.on("SIGTERM", () => {
|
||||
logger.info("👋 SIGTERM received, shutting down gracefully");
|
||||
|
||||
// Give server time to close connections
|
||||
setTimeout(() => {
|
||||
process.exit(0);
|
||||
}, 10000);
|
||||
});
|
||||
|
||||
/**
|
||||
* Handle SIGINT gracefully (Ctrl+C)
|
||||
*/
|
||||
process.on("SIGINT", () => {
|
||||
logger.info("👋 SIGINT received, shutting down gracefully");
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
logger.info("✅ Global process error handlers registered");
|
||||
|
||||
module.exports = {
|
||||
// Exports for testing if needed
|
||||
};
|
||||
@@ -31,9 +31,7 @@ const validators = {
|
||||
.withMessage("Valid email is required")
|
||||
.normalizeEmail()
|
||||
.trim(),
|
||||
body("password")
|
||||
.isLength({ min: 8 })
|
||||
.withMessage("Password must be at least 8 characters"),
|
||||
body("password").notEmpty().withMessage("Password is required").trim(),
|
||||
],
|
||||
|
||||
// User validators
|
||||
@@ -51,10 +49,10 @@ const validators = {
|
||||
)
|
||||
.trim(),
|
||||
body("password")
|
||||
.isLength({ min: 8 })
|
||||
.matches(/^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)/)
|
||||
.isLength({ min: 12 })
|
||||
.matches(/^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?&#])/)
|
||||
.withMessage(
|
||||
"Password must be at least 8 characters with uppercase, lowercase, and number"
|
||||
"Password must be at least 12 characters with uppercase, lowercase, number, and special character"
|
||||
),
|
||||
body("role_id").notEmpty().withMessage("Role is required").trim(),
|
||||
],
|
||||
|
||||
380
backend/migrations/006_database_fixes.sql
Normal file
380
backend/migrations/006_database_fixes.sql
Normal file
@@ -0,0 +1,380 @@
|
||||
-- =====================================================
|
||||
-- DATABASE FIXES FOR SKYARTSHOP
|
||||
-- Date: January 4, 2026
|
||||
-- Purpose: Add missing indexes, foreign keys, and constraints
|
||||
-- =====================================================
|
||||
|
||||
-- =====================================================
|
||||
-- PART 1: ADD MISSING FOREIGN KEYS
|
||||
-- =====================================================
|
||||
|
||||
-- Add foreign key constraint for product_images -> products
|
||||
-- This ensures referential integrity and enables CASCADE deletes
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'fk_product_images_product'
|
||||
AND table_name = 'product_images'
|
||||
) THEN
|
||||
ALTER TABLE product_images
|
||||
ADD CONSTRAINT fk_product_images_product
|
||||
FOREIGN KEY (product_id) REFERENCES products(id)
|
||||
ON DELETE CASCADE;
|
||||
RAISE NOTICE 'Added foreign key: product_images -> products';
|
||||
ELSE
|
||||
RAISE NOTICE 'Foreign key product_images -> products already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Add foreign key constraint for uploads -> media_folders
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'fk_uploads_folder'
|
||||
AND table_name = 'uploads'
|
||||
) THEN
|
||||
-- First ensure all uploads have valid folder_id or NULL
|
||||
UPDATE uploads
|
||||
SET folder_id = NULL
|
||||
WHERE folder_id NOT IN (SELECT id FROM media_folders);
|
||||
|
||||
ALTER TABLE uploads
|
||||
ADD CONSTRAINT fk_uploads_folder
|
||||
FOREIGN KEY (folder_id) REFERENCES media_folders(id)
|
||||
ON DELETE SET NULL;
|
||||
RAISE NOTICE 'Added foreign key: uploads -> media_folders';
|
||||
ELSE
|
||||
RAISE NOTICE 'Foreign key uploads -> media_folders already exists';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 2: ADD MISSING INDEXES FOR PERFORMANCE
|
||||
-- =====================================================
|
||||
|
||||
-- Products table indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_products_isactive
|
||||
ON products(isactive) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_products_isfeatured
|
||||
ON products(isfeatured, createdat DESC)
|
||||
WHERE isfeatured = true AND isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_products_isbestseller
|
||||
ON products(isbestseller, createdat DESC)
|
||||
WHERE isbestseller = true AND isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_products_category
|
||||
ON products(category, createdat DESC)
|
||||
WHERE isactive = true AND category IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_products_createdat
|
||||
ON products(createdat DESC) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_products_price
|
||||
ON products(price) WHERE isactive = true;
|
||||
|
||||
-- Portfolio projects indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_isactive
|
||||
ON portfolioprojects(isactive) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_category
|
||||
ON portfolioprojects(category) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_displayorder
|
||||
ON portfolioprojects(displayorder ASC, createdat DESC)
|
||||
WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_portfolio_createdat
|
||||
ON portfolioprojects(createdat DESC) WHERE isactive = true;
|
||||
|
||||
-- Pages indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_pages_slug
|
||||
ON pages(slug) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_pages_isactive
|
||||
ON pages(isactive) WHERE isactive = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_pages_createdat
|
||||
ON pages(createdat DESC) WHERE isactive = true;
|
||||
|
||||
-- Product images indexes (already exist, but verify)
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_product_id
|
||||
ON product_images(product_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_is_primary
|
||||
ON product_images(product_id, is_primary) WHERE is_primary = true;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_display_order
|
||||
ON product_images(product_id, display_order, created_at);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_color_variant
|
||||
ON product_images(color_variant) WHERE color_variant IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_product_images_color_code
|
||||
ON product_images(color_code) WHERE color_code IS NOT NULL;
|
||||
|
||||
-- Homepage sections indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_homepagesections_displayorder
|
||||
ON homepagesections(displayorder ASC);
|
||||
|
||||
-- Team members indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_team_members_displayorder
|
||||
ON team_members(display_order ASC, created_at DESC);
|
||||
|
||||
-- Uploads indexes (verify existing)
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_filename
|
||||
ON uploads(filename);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_folder_id
|
||||
ON uploads(folder_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_created_at
|
||||
ON uploads(created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_uploads_usage
|
||||
ON uploads(used_in_type, used_in_id)
|
||||
WHERE used_in_type IS NOT NULL;
|
||||
|
||||
-- Media folders indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_media_folders_parent_id
|
||||
ON media_folders(parent_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_media_folders_path
|
||||
ON media_folders(path);
|
||||
|
||||
-- Session table optimization (for express-session)
|
||||
CREATE INDEX IF NOT EXISTS idx_session_expire
|
||||
ON session(expire);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_session_sid
|
||||
ON session(sid);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 3: ADD UNIQUE CONSTRAINTS
|
||||
-- =====================================================
|
||||
|
||||
-- Ensure unique slugs (blogposts already has this)
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Products slug unique constraint
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'unique_products_slug'
|
||||
) THEN
|
||||
-- First, fix any duplicate slugs
|
||||
WITH duplicates AS (
|
||||
SELECT slug, COUNT(*) as cnt, array_agg(id) as ids
|
||||
FROM products
|
||||
WHERE slug IS NOT NULL
|
||||
GROUP BY slug
|
||||
HAVING COUNT(*) > 1
|
||||
)
|
||||
UPDATE products p
|
||||
SET slug = p.slug || '-' || substring(p.id, 1, 8)
|
||||
WHERE p.id IN (
|
||||
SELECT unnest(ids[2:]) FROM duplicates
|
||||
);
|
||||
|
||||
ALTER TABLE products
|
||||
ADD CONSTRAINT unique_products_slug UNIQUE(slug);
|
||||
RAISE NOTICE 'Added unique constraint on products.slug';
|
||||
END IF;
|
||||
|
||||
-- Pages slug unique constraint
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conname = 'unique_pages_slug'
|
||||
) THEN
|
||||
-- Fix any duplicate slugs
|
||||
WITH duplicates AS (
|
||||
SELECT slug, COUNT(*) as cnt, array_agg(id) as ids
|
||||
FROM pages
|
||||
WHERE slug IS NOT NULL
|
||||
GROUP BY slug
|
||||
HAVING COUNT(*) > 1
|
||||
)
|
||||
UPDATE pages p
|
||||
SET slug = p.slug || '-' || p.id::text
|
||||
WHERE p.id IN (
|
||||
SELECT unnest(ids[2:]) FROM duplicates
|
||||
);
|
||||
|
||||
ALTER TABLE pages
|
||||
ADD CONSTRAINT unique_pages_slug UNIQUE(slug);
|
||||
RAISE NOTICE 'Added unique constraint on pages.slug';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 4: ADD CHECK CONSTRAINTS FOR DATA INTEGRITY
|
||||
-- =====================================================
|
||||
|
||||
-- Products price and stock constraints
|
||||
ALTER TABLE products DROP CONSTRAINT IF EXISTS check_products_price_positive;
|
||||
ALTER TABLE products
|
||||
ADD CONSTRAINT check_products_price_positive
|
||||
CHECK (price >= 0);
|
||||
|
||||
ALTER TABLE products DROP CONSTRAINT IF EXISTS check_products_stock_nonnegative;
|
||||
ALTER TABLE products
|
||||
ADD CONSTRAINT check_products_stock_nonnegative
|
||||
CHECK (stockquantity >= 0);
|
||||
|
||||
-- Product images variant constraints
|
||||
ALTER TABLE product_images DROP CONSTRAINT IF EXISTS check_variant_price_positive;
|
||||
ALTER TABLE product_images
|
||||
ADD CONSTRAINT check_variant_price_positive
|
||||
CHECK (variant_price IS NULL OR variant_price >= 0);
|
||||
|
||||
ALTER TABLE product_images DROP CONSTRAINT IF EXISTS check_variant_stock_nonnegative;
|
||||
ALTER TABLE product_images
|
||||
ADD CONSTRAINT check_variant_stock_nonnegative
|
||||
CHECK (variant_stock >= 0);
|
||||
|
||||
-- Ensure display_order is non-negative
|
||||
ALTER TABLE product_images DROP CONSTRAINT IF EXISTS check_display_order_nonnegative;
|
||||
ALTER TABLE product_images
|
||||
ADD CONSTRAINT check_display_order_nonnegative
|
||||
CHECK (display_order >= 0);
|
||||
|
||||
ALTER TABLE portfolioprojects DROP CONSTRAINT IF EXISTS check_displayorder_nonnegative;
|
||||
ALTER TABLE portfolioprojects
|
||||
ADD CONSTRAINT check_displayorder_nonnegative
|
||||
CHECK (displayorder >= 0);
|
||||
|
||||
ALTER TABLE homepagesections DROP CONSTRAINT IF EXISTS check_displayorder_nonnegative;
|
||||
ALTER TABLE homepagesections
|
||||
ADD CONSTRAINT check_displayorder_nonnegative
|
||||
CHECK (displayorder >= 0);
|
||||
|
||||
ALTER TABLE team_members DROP CONSTRAINT IF EXISTS check_display_order_nonnegative;
|
||||
ALTER TABLE team_members
|
||||
ADD CONSTRAINT check_display_order_nonnegative
|
||||
CHECK (display_order >= 0);
|
||||
|
||||
-- =====================================================
|
||||
-- PART 5: ADD MISSING COLUMNS (IF ANY)
|
||||
-- =====================================================
|
||||
|
||||
-- Ensure all tables have proper timestamp columns
|
||||
ALTER TABLE products
|
||||
ADD COLUMN IF NOT EXISTS createdat TIMESTAMP DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS updatedat TIMESTAMP DEFAULT NOW();
|
||||
|
||||
ALTER TABLE portfolioprojects
|
||||
ADD COLUMN IF NOT EXISTS createdat TIMESTAMP DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS updatedat TIMESTAMP DEFAULT NOW();
|
||||
|
||||
ALTER TABLE blogposts
|
||||
ADD COLUMN IF NOT EXISTS createdat TIMESTAMP DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS updatedat TIMESTAMP DEFAULT NOW();
|
||||
|
||||
ALTER TABLE pages
|
||||
ADD COLUMN IF NOT EXISTS createdat TIMESTAMP DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS updatedat TIMESTAMP DEFAULT NOW();
|
||||
|
||||
-- Ensure portfolio has imageurl column
|
||||
ALTER TABLE portfolioprojects
|
||||
ADD COLUMN IF NOT EXISTS imageurl VARCHAR(500);
|
||||
|
||||
-- Ensure pages has pagecontent column
|
||||
ALTER TABLE pages
|
||||
ADD COLUMN IF NOT EXISTS pagecontent TEXT;
|
||||
|
||||
-- Ensure pages has ispublished column
|
||||
ALTER TABLE pages
|
||||
ADD COLUMN IF NOT EXISTS ispublished BOOLEAN DEFAULT true;
|
||||
|
||||
-- Ensure blogposts has ispublished column
|
||||
ALTER TABLE blogposts
|
||||
ADD COLUMN IF NOT EXISTS ispublished BOOLEAN DEFAULT true;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 6: DATA INTEGRITY FIXES
|
||||
-- =====================================================
|
||||
|
||||
-- Generate missing slugs for products
|
||||
UPDATE products
|
||||
SET slug = LOWER(REGEXP_REPLACE(REGEXP_REPLACE(name, '[^a-zA-Z0-9\s-]', '', 'g'), '\s+', '-', 'g'))
|
||||
WHERE (slug IS NULL OR slug = '') AND name IS NOT NULL;
|
||||
|
||||
-- Set ispublished from isactive for pages if NULL
|
||||
UPDATE pages
|
||||
SET ispublished = isactive
|
||||
WHERE ispublished IS NULL;
|
||||
|
||||
-- Set ispublished from isactive for blog if NULL
|
||||
UPDATE blogposts
|
||||
SET ispublished = isactive
|
||||
WHERE ispublished IS NULL;
|
||||
|
||||
-- Migrate portfolio featured image to imageurl if needed
|
||||
UPDATE portfolioprojects
|
||||
SET imageurl = featuredimage
|
||||
WHERE imageurl IS NULL AND featuredimage IS NOT NULL;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 7: ANALYZE TABLES FOR QUERY OPTIMIZATION
|
||||
-- =====================================================
|
||||
|
||||
ANALYZE products;
|
||||
ANALYZE product_images;
|
||||
ANALYZE portfolioprojects;
|
||||
ANALYZE blogposts;
|
||||
ANALYZE pages;
|
||||
ANALYZE homepagesections;
|
||||
ANALYZE uploads;
|
||||
ANALYZE media_folders;
|
||||
ANALYZE team_members;
|
||||
ANALYZE site_settings;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 8: VERIFICATION QUERIES
|
||||
-- =====================================================
|
||||
|
||||
-- Show foreign keys
|
||||
SELECT
|
||||
tc.table_name,
|
||||
kcu.column_name,
|
||||
ccu.table_name AS foreign_table,
|
||||
rc.delete_rule
|
||||
FROM information_schema.table_constraints AS tc
|
||||
JOIN information_schema.key_column_usage AS kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
JOIN information_schema.constraint_column_usage AS ccu
|
||||
ON ccu.constraint_name = tc.constraint_name
|
||||
JOIN information_schema.referential_constraints AS rc
|
||||
ON tc.constraint_name = rc.constraint_name
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_schema = 'public'
|
||||
ORDER BY tc.table_name;
|
||||
|
||||
-- Show unique constraints
|
||||
SELECT
|
||||
tc.table_name,
|
||||
kcu.column_name,
|
||||
tc.constraint_name
|
||||
FROM information_schema.table_constraints tc
|
||||
JOIN information_schema.key_column_usage kcu
|
||||
ON tc.constraint_name = kcu.constraint_name
|
||||
WHERE tc.constraint_type = 'UNIQUE'
|
||||
AND tc.table_schema = 'public'
|
||||
AND tc.table_name IN ('products', 'blogposts', 'pages')
|
||||
ORDER BY tc.table_name;
|
||||
|
||||
-- Show index counts
|
||||
SELECT
|
||||
tablename,
|
||||
COUNT(*) as index_count
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename IN ('products', 'product_images', 'portfolioprojects', 'blogposts', 'pages')
|
||||
GROUP BY tablename
|
||||
ORDER BY tablename;
|
||||
|
||||
-- =====================================================
|
||||
-- END OF DATABASE FIXES
|
||||
-- =====================================================
|
||||
262
backend/prisma/schema-updated.prisma
Normal file
262
backend/prisma/schema-updated.prisma
Normal file
@@ -0,0 +1,262 @@
|
||||
// Prisma Schema - Complete and Aligned with PostgreSQL
|
||||
// Database schema definition and ORM configuration
|
||||
// Last updated: January 3, 2026
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = "postgresql://skyartapp:SkyArt2025Pass@localhost:5432/skyartshop?schema=public"
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// ADMIN & AUTH MODELS
|
||||
// =====================================================
|
||||
|
||||
model AdminUser {
|
||||
id String @id @default(uuid())
|
||||
username String @unique
|
||||
email String @unique
|
||||
password String
|
||||
name String?
|
||||
role String @default("admin")
|
||||
isActive Boolean @default(true) @map("isactive")
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
@@map("adminusers")
|
||||
}
|
||||
|
||||
model Role {
|
||||
id Int @id @default(autoincrement())
|
||||
name String @unique
|
||||
description String?
|
||||
permissions String[]
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
|
||||
@@map("roles")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// PRODUCT MODELS
|
||||
// =====================================================
|
||||
|
||||
model Product {
|
||||
id String @id @default(uuid())
|
||||
name String
|
||||
slug String? @unique
|
||||
shortDescription String? @map("shortdescription") @db.Text
|
||||
description String? @db.Text
|
||||
price Decimal @db.Decimal(10, 2)
|
||||
stockQuantity Int @default(0) @map("stockquantity")
|
||||
category String?
|
||||
sku String?
|
||||
weight Decimal? @db.Decimal(10, 2)
|
||||
dimensions String?
|
||||
material String?
|
||||
isActive Boolean @default(true) @map("isactive")
|
||||
isFeatured Boolean @default(false) @map("isfeatured")
|
||||
isBestseller Boolean @default(false) @map("isbestseller")
|
||||
metaKeywords String? @map("metakeywords") @db.Text
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
images ProductImage[]
|
||||
|
||||
@@index([isActive])
|
||||
@@index([isFeatured, isActive])
|
||||
@@index([slug])
|
||||
@@index([category])
|
||||
@@index([createdAt(sort: Desc)])
|
||||
@@map("products")
|
||||
}
|
||||
|
||||
model ProductImage {
|
||||
id String @id @default(uuid())
|
||||
productId String @map("product_id")
|
||||
imageUrl String @map("image_url")
|
||||
colorVariant String? @map("color_variant")
|
||||
colorCode String? @map("color_code")
|
||||
altText String? @map("alt_text")
|
||||
displayOrder Int @default(0) @map("display_order")
|
||||
isPrimary Boolean @default(false) @map("is_primary")
|
||||
variantPrice Decimal? @map("variant_price") @db.Decimal(10, 2)
|
||||
variantStock Int @default(0) @map("variant_stock")
|
||||
createdAt DateTime @default(now()) @map("created_at")
|
||||
|
||||
product Product @relation(fields: [productId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@index([productId])
|
||||
@@index([productId, isPrimary])
|
||||
@@index([productId, displayOrder, createdAt])
|
||||
@@index([colorVariant])
|
||||
@@map("product_images")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// PORTFOLIO MODELS
|
||||
// =====================================================
|
||||
|
||||
model PortfolioProject {
|
||||
id String @id @default(uuid())
|
||||
title String
|
||||
description String? @db.Text
|
||||
featuredImage String? @map("featuredimage")
|
||||
imageUrl String? @map("imageurl")
|
||||
images Json? @db.JsonB
|
||||
category String?
|
||||
categoryId Int? @map("categoryid")
|
||||
isActive Boolean @default(true) @map("isactive")
|
||||
displayOrder Int @default(0) @map("displayorder")
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
@@index([isActive])
|
||||
@@index([displayOrder, createdAt(sort: Desc)])
|
||||
@@map("portfolioprojects")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// BLOG MODELS
|
||||
// =====================================================
|
||||
|
||||
model BlogPost {
|
||||
id String @id @default(uuid())
|
||||
title String
|
||||
slug String @unique
|
||||
excerpt String? @db.Text
|
||||
content String @db.Text
|
||||
imageUrl String? @map("imageurl")
|
||||
isPublished Boolean @default(true) @map("ispublished")
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
@@index([isPublished])
|
||||
@@index([slug])
|
||||
@@index([createdAt(sort: Desc)])
|
||||
@@map("blogposts")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// PAGE MODELS
|
||||
// =====================================================
|
||||
|
||||
model Page {
|
||||
id String @id @default(uuid())
|
||||
title String
|
||||
slug String @unique
|
||||
pageContent String? @map("pagecontent") @db.Text
|
||||
metaTitle String? @map("metatitle")
|
||||
metaDescription String? @map("metadescription") @db.Text
|
||||
isActive Boolean @default(true) @map("isactive")
|
||||
isPublished Boolean @default(true) @map("ispublished")
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
@@index([isActive])
|
||||
@@index([slug])
|
||||
@@map("pages")
|
||||
}
|
||||
|
||||
model HomepageSection {
|
||||
id Int @id @default(autoincrement())
|
||||
sectionType String @map("sectiontype")
|
||||
title String?
|
||||
content Json? @db.JsonB
|
||||
displayOrder Int @default(0) @map("displayorder")
|
||||
isActive Boolean @default(true) @map("isactive")
|
||||
createdAt DateTime @default(now()) @map("createdat")
|
||||
updatedAt DateTime @updatedAt @map("updatedat")
|
||||
|
||||
@@index([displayOrder])
|
||||
@@map("homepagesections")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// MEDIA LIBRARY MODELS
|
||||
// =====================================================
|
||||
|
||||
model Upload {
|
||||
id Int @id @default(autoincrement())
|
||||
filename String @unique
|
||||
originalName String @map("original_name")
|
||||
filePath String @map("file_path")
|
||||
fileSize Int @map("file_size")
|
||||
mimeType String @map("mime_type")
|
||||
uploadedBy String? @map("uploaded_by")
|
||||
folderId Int? @map("folder_id")
|
||||
usedInType String? @map("used_in_type")
|
||||
usedInId String? @map("used_in_id")
|
||||
createdAt DateTime @default(now()) @map("created_at")
|
||||
updatedAt DateTime @updatedAt @map("updated_at")
|
||||
|
||||
folder MediaFolder? @relation(fields: [folderId], references: [id], onDelete: SetNull)
|
||||
|
||||
@@index([filename])
|
||||
@@index([createdAt(sort: Desc)])
|
||||
@@index([folderId])
|
||||
@@index([usedInType, usedInId])
|
||||
@@map("uploads")
|
||||
}
|
||||
|
||||
model MediaFolder {
|
||||
id Int @id @default(autoincrement())
|
||||
name String
|
||||
parentId Int? @map("parent_id")
|
||||
path String
|
||||
createdBy String? @map("created_by")
|
||||
createdAt DateTime @default(now()) @map("created_at")
|
||||
updatedAt DateTime @updatedAt @map("updated_at")
|
||||
|
||||
parent MediaFolder? @relation("FolderHierarchy", fields: [parentId], references: [id], onDelete: Cascade)
|
||||
children MediaFolder[] @relation("FolderHierarchy")
|
||||
uploads Upload[]
|
||||
|
||||
@@unique([parentId, name])
|
||||
@@index([parentId])
|
||||
@@index([path])
|
||||
@@map("media_folders")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// SITE SETTINGS MODELS
|
||||
// =====================================================
|
||||
|
||||
model SiteSetting {
|
||||
id Int @id @default(autoincrement())
|
||||
key String @unique
|
||||
settings Json @default("{}") @db.JsonB
|
||||
createdAt DateTime @default(now()) @map("created_at")
|
||||
updatedAt DateTime @updatedAt @map("updated_at")
|
||||
|
||||
@@map("site_settings")
|
||||
}
|
||||
|
||||
model TeamMember {
|
||||
id Int @id @default(autoincrement())
|
||||
name String
|
||||
position String
|
||||
bio String? @db.Text
|
||||
imageUrl String? @map("image_url")
|
||||
displayOrder Int @default(0) @map("display_order")
|
||||
createdAt DateTime @default(now()) @map("created_at")
|
||||
updatedAt DateTime @updatedAt @map("updated_at")
|
||||
|
||||
@@index([displayOrder, createdAt(sort: Desc)])
|
||||
@@map("team_members")
|
||||
}
|
||||
|
||||
// =====================================================
|
||||
// SESSION MODEL (for express-session)
|
||||
// =====================================================
|
||||
|
||||
model Session {
|
||||
sid String @id
|
||||
sess Json @db.JsonB
|
||||
expire DateTime
|
||||
|
||||
@@index([expire])
|
||||
@@map("session")
|
||||
}
|
||||
280
backend/query-optimization-analysis.sql
Normal file
280
backend/query-optimization-analysis.sql
Normal file
@@ -0,0 +1,280 @@
|
||||
-- =====================================================
|
||||
-- QUERY OPTIMIZATION ANALYSIS FOR SKYARTSHOP
|
||||
-- Date: January 3, 2026
|
||||
-- Purpose: Analyze and optimize slow queries
|
||||
-- =====================================================
|
||||
|
||||
-- =====================================================
|
||||
-- PART 1: QUERY PERFORMANCE ANALYSIS
|
||||
-- =====================================================
|
||||
|
||||
-- Show slow queries (if pg_stat_statements is enabled)
|
||||
-- SELECT
|
||||
-- substring(query, 1, 100) AS short_query,
|
||||
-- round(total_exec_time::numeric, 2) AS total_time,
|
||||
-- calls,
|
||||
-- round(mean_exec_time::numeric, 2) AS avg_time,
|
||||
-- round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS percentage
|
||||
-- FROM pg_stat_statements
|
||||
-- WHERE query NOT LIKE '%pg_stat%'
|
||||
-- ORDER BY total_exec_time DESC
|
||||
-- LIMIT 20;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 2: TABLE SIZE AND BLOAT ANALYSIS
|
||||
-- =====================================================
|
||||
|
||||
-- Show table sizes
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
|
||||
pg_size_pretty(pg_relation_size(schemaname||'.'||tablename)) AS table_size,
|
||||
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) AS indexes_size
|
||||
FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 3: INDEX USAGE STATISTICS
|
||||
-- =====================================================
|
||||
|
||||
-- Show unused indexes (candidates for removal)
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan as times_used,
|
||||
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND idx_scan = 0
|
||||
AND indexrelname NOT LIKE '%_pkey'
|
||||
ORDER BY pg_relation_size(indexrelid) DESC;
|
||||
|
||||
-- Show most used indexes
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan as times_used,
|
||||
idx_tup_read as rows_read,
|
||||
idx_tup_fetch as rows_fetched,
|
||||
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE schemaname = 'public'
|
||||
ORDER BY idx_scan DESC
|
||||
LIMIT 20;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 4: SEQUENTIAL SCAN ANALYSIS
|
||||
-- =====================================================
|
||||
|
||||
-- Tables with high sequential scan rates (may need indexes)
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
seq_scan,
|
||||
seq_tup_read,
|
||||
idx_scan,
|
||||
seq_tup_read / NULLIF(seq_scan, 0) AS avg_seq_rows,
|
||||
n_live_tup as live_rows,
|
||||
CASE
|
||||
WHEN seq_scan > 0 THEN
|
||||
round((100.0 * seq_scan / NULLIF(seq_scan + idx_scan, 0))::numeric, 2)
|
||||
ELSE 0
|
||||
END AS seq_scan_percentage
|
||||
FROM pg_stat_user_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND seq_scan > 0
|
||||
ORDER BY seq_scan DESC;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 5: MISSING INDEX SUGGESTIONS
|
||||
-- =====================================================
|
||||
|
||||
-- Queries that might benefit from indexes
|
||||
-- Based on common query patterns in the application
|
||||
|
||||
-- Suggestion 1: Composite index for product listing with filters
|
||||
COMMENT ON INDEX idx_products_composite IS 'Optimizes: SELECT * FROM products WHERE isactive = true AND isfeatured = true ORDER BY createdat DESC';
|
||||
|
||||
-- Suggestion 2: Index for product images by color
|
||||
COMMENT ON INDEX idx_product_images_color IS 'Optimizes: SELECT * FROM product_images WHERE color_variant = ?';
|
||||
|
||||
-- Suggestion 3: Index for blog post slug lookup
|
||||
COMMENT ON INDEX idx_blogposts_slug IS 'Optimizes: SELECT * FROM blogposts WHERE slug = ? AND ispublished = true';
|
||||
|
||||
-- =====================================================
|
||||
-- PART 6: QUERY REWRITE SUGGESTIONS
|
||||
-- =====================================================
|
||||
|
||||
-- ORIGINAL: Get products with images (inefficient)
|
||||
-- SELECT p.*, pi.* FROM products p
|
||||
-- LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
-- WHERE p.isactive = true;
|
||||
|
||||
-- OPTIMIZED: Use JSON aggregation to reduce rows
|
||||
-- SELECT p.*,
|
||||
-- COALESCE(json_agg(pi.*) FILTER (WHERE pi.id IS NOT NULL), '[]') as images
|
||||
-- FROM products p
|
||||
-- LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
-- WHERE p.isactive = true
|
||||
-- GROUP BY p.id;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 7: MATERIALIZED VIEW FOR EXPENSIVE QUERIES
|
||||
-- =====================================================
|
||||
|
||||
-- Create materialized view for product catalog (if needed for very high traffic)
|
||||
-- DROP MATERIALIZED VIEW IF EXISTS mv_product_catalog;
|
||||
-- CREATE MATERIALIZED VIEW mv_product_catalog AS
|
||||
-- SELECT
|
||||
-- p.id, p.name, p.slug, p.shortdescription, p.price,
|
||||
-- p.category, p.stockquantity, p.isfeatured, p.isbestseller,
|
||||
-- json_agg(
|
||||
-- json_build_object(
|
||||
-- 'id', pi.id,
|
||||
-- 'image_url', pi.image_url,
|
||||
-- 'color_variant', pi.color_variant,
|
||||
-- 'is_primary', pi.is_primary
|
||||
-- ) ORDER BY pi.display_order
|
||||
-- ) FILTER (WHERE pi.id IS NOT NULL) as images
|
||||
-- FROM products p
|
||||
-- LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
-- WHERE p.isactive = true
|
||||
-- GROUP BY p.id;
|
||||
--
|
||||
-- CREATE INDEX ON mv_product_catalog(id);
|
||||
-- CREATE INDEX ON mv_product_catalog(slug);
|
||||
-- CREATE INDEX ON mv_product_catalog(category);
|
||||
-- CREATE INDEX ON mv_product_catalog(isfeatured) WHERE isfeatured = true;
|
||||
|
||||
-- Refresh command (run after product updates):
|
||||
-- REFRESH MATERIALIZED VIEW CONCURRENTLY mv_product_catalog;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 8: VACUUM AND ANALYZE
|
||||
-- =====================================================
|
||||
|
||||
-- Full vacuum to reclaim space and update stats
|
||||
VACUUM ANALYZE products;
|
||||
VACUUM ANALYZE product_images;
|
||||
VACUUM ANALYZE blogposts;
|
||||
VACUUM ANALYZE portfolioprojects;
|
||||
VACUUM ANALYZE pages;
|
||||
VACUUM ANALYZE uploads;
|
||||
VACUUM ANALYZE media_folders;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 9: CONNECTION POOL OPTIMIZATION
|
||||
-- =====================================================
|
||||
|
||||
-- Show current database connections
|
||||
SELECT
|
||||
datname,
|
||||
count(*) as connections,
|
||||
max(backend_start) as latest_connection
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = 'skyartshop'
|
||||
GROUP BY datname;
|
||||
|
||||
-- Show connection limits
|
||||
SELECT
|
||||
name,
|
||||
setting,
|
||||
unit
|
||||
FROM pg_settings
|
||||
WHERE name IN ('max_connections', 'superuser_reserved_connections');
|
||||
|
||||
-- =====================================================
|
||||
-- PART 10: CACHE HIT RATIO
|
||||
-- =====================================================
|
||||
|
||||
-- Check cache hit ratio (should be > 99%)
|
||||
SELECT
|
||||
sum(heap_blks_read) as heap_read,
|
||||
sum(heap_blks_hit) as heap_hit,
|
||||
CASE
|
||||
WHEN sum(heap_blks_hit) + sum(heap_blks_read) > 0 THEN
|
||||
round(100.0 * sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)), 2)
|
||||
ELSE 0
|
||||
END as cache_hit_ratio
|
||||
FROM pg_statio_user_tables;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 11: SPECIFIC QUERY OPTIMIZATIONS
|
||||
-- =====================================================
|
||||
|
||||
-- Optimized query for product listing page
|
||||
EXPLAIN ANALYZE
|
||||
SELECT p.id, p.name, p.slug, p.price, p.stockquantity, p.category,
|
||||
COALESCE(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
'image_url', pi.image_url,
|
||||
'is_primary', pi.is_primary
|
||||
) ORDER BY pi.display_order
|
||||
) FILTER (WHERE pi.id IS NOT NULL),
|
||||
'[]'::json
|
||||
) as images
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id AND pi.is_primary = true
|
||||
WHERE p.isactive = true
|
||||
GROUP BY p.id
|
||||
ORDER BY p.createdat DESC
|
||||
LIMIT 50;
|
||||
|
||||
-- Optimized query for single product detail
|
||||
EXPLAIN ANALYZE
|
||||
SELECT p.*,
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
'image_url', pi.image_url,
|
||||
'color_variant', pi.color_variant,
|
||||
'color_code', pi.color_code,
|
||||
'alt_text', pi.alt_text,
|
||||
'display_order', pi.display_order,
|
||||
'is_primary', pi.is_primary,
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order
|
||||
) FILTER (WHERE pi.id IS NOT NULL) as images
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE p.slug = 'example-product' AND p.isactive = true
|
||||
GROUP BY p.id;
|
||||
|
||||
-- =====================================================
|
||||
-- PART 12: PARTITIONING RECOMMENDATIONS (for scale)
|
||||
-- =====================================================
|
||||
|
||||
-- If you have millions of products or images, consider partitioning
|
||||
-- Example: Partition products by category or date
|
||||
|
||||
-- CREATE TABLE products_paintings PARTITION OF products
|
||||
-- FOR VALUES IN ('Paintings', 'Oil Paintings', 'Watercolor');
|
||||
--
|
||||
-- CREATE TABLE products_sculptures PARTITION OF products
|
||||
-- FOR VALUES IN ('Sculptures', '3D Art');
|
||||
|
||||
-- =====================================================
|
||||
-- RECOMMENDATIONS SUMMARY
|
||||
-- =====================================================
|
||||
|
||||
-- 1. Ensure all indexes from Part 2 of database-analysis-fixes.sql are created
|
||||
-- 2. Monitor slow queries using pg_stat_statements
|
||||
-- 3. Set up regular VACUUM ANALYZE jobs (daily or weekly)
|
||||
-- 4. Keep cache hit ratio above 99%
|
||||
-- 5. Limit connection pool size to 20-50 connections
|
||||
-- 6. Use prepared statements for frequently executed queries
|
||||
-- 7. Implement application-level caching (Redis) for hot data
|
||||
-- 8. Consider read replicas for scaling reads
|
||||
-- 9. Use JSONB for flexible schema parts (settings, metadata)
|
||||
-- 10. Monitor table bloat and run VACUUM FULL if needed
|
||||
|
||||
-- =====================================================
|
||||
-- END OF QUERY OPTIMIZATION ANALYSIS
|
||||
-- =====================================================
|
||||
@@ -2,6 +2,7 @@ const express = require("express");
|
||||
const { query } = require("../config/database");
|
||||
const { requireAuth } = require("../middleware/auth");
|
||||
const { cache } = require("../middleware/cache");
|
||||
const { apiLimiter } = require("../config/rateLimiter");
|
||||
const {
|
||||
invalidateProductCache,
|
||||
invalidateBlogCache,
|
||||
@@ -19,6 +20,9 @@ const { getById, deleteById, countRecords } = require("../utils/queryHelpers");
|
||||
const { HTTP_STATUS } = require("../config/constants");
|
||||
const router = express.Router();
|
||||
|
||||
// Apply rate limiting to all admin routes
|
||||
router.use(apiLimiter);
|
||||
|
||||
// Dashboard stats API
|
||||
router.get(
|
||||
"/dashboard/stats",
|
||||
|
||||
@@ -13,6 +13,11 @@ const {
|
||||
sendUnauthorized,
|
||||
} = require("../utils/responseHelpers");
|
||||
const { HTTP_STATUS } = require("../config/constants");
|
||||
const {
|
||||
recordFailedAttempt,
|
||||
resetFailedAttempts,
|
||||
checkBlocked,
|
||||
} = require("../middleware/bruteForceProtection");
|
||||
const router = express.Router();
|
||||
|
||||
const getUserByEmail = async (email) => {
|
||||
@@ -47,28 +52,36 @@ const createUserSession = (req, user) => {
|
||||
// Login endpoint
|
||||
router.post(
|
||||
"/login",
|
||||
checkBlocked,
|
||||
validators.login,
|
||||
handleValidationErrors,
|
||||
asyncHandler(async (req, res) => {
|
||||
const { email, password } = req.body;
|
||||
const ip = req.ip || req.connection.remoteAddress;
|
||||
const admin = await getUserByEmail(email);
|
||||
|
||||
if (!admin) {
|
||||
logger.warn("Login attempt with invalid email", { email });
|
||||
logger.warn("Login attempt with invalid email", { email, ip });
|
||||
recordFailedAttempt(ip);
|
||||
return sendUnauthorized(res, "Invalid email or password");
|
||||
}
|
||||
|
||||
if (!admin.isactive) {
|
||||
logger.warn("Login attempt with deactivated account", { email });
|
||||
logger.warn("Login attempt with deactivated account", { email, ip });
|
||||
recordFailedAttempt(ip);
|
||||
return sendUnauthorized(res, "Account is deactivated");
|
||||
}
|
||||
|
||||
const validPassword = await bcrypt.compare(password, admin.passwordhash);
|
||||
if (!validPassword) {
|
||||
logger.warn("Login attempt with invalid password", { email });
|
||||
logger.warn("Login attempt with invalid password", { email, ip });
|
||||
recordFailedAttempt(ip);
|
||||
return sendUnauthorized(res, "Invalid email or password");
|
||||
}
|
||||
|
||||
// Reset failed attempts on successful login
|
||||
resetFailedAttempts(ip);
|
||||
|
||||
await updateLastLogin(admin.id);
|
||||
createUserSession(req, admin);
|
||||
|
||||
@@ -81,6 +94,7 @@ router.post(
|
||||
logger.info("User logged in successfully", {
|
||||
userId: admin.id,
|
||||
email: admin.email,
|
||||
ip,
|
||||
});
|
||||
sendSuccess(res, { user: req.session.user });
|
||||
});
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
const express = require("express");
|
||||
const { query } = require("../config/database");
|
||||
const { query, batchQuery } = require("../config/database");
|
||||
const logger = require("../config/logger");
|
||||
const { asyncHandler } = require("../middleware/errorHandler");
|
||||
const { cacheMiddleware, cache } = require("../middleware/cache");
|
||||
const {
|
||||
addCacheHeaders,
|
||||
fieldFilter,
|
||||
paginate,
|
||||
trackResponseTime,
|
||||
generateETag,
|
||||
optimizeJSON,
|
||||
} = require("../middleware/apiOptimization");
|
||||
const {
|
||||
sendSuccess,
|
||||
sendError,
|
||||
@@ -10,71 +18,73 @@ const {
|
||||
} = require("../utils/responseHelpers");
|
||||
const router = express.Router();
|
||||
|
||||
// Apply global optimizations to all routes
|
||||
router.use(trackResponseTime);
|
||||
router.use(fieldFilter);
|
||||
router.use(optimizeJSON);
|
||||
|
||||
// Reusable query fragments
|
||||
const PRODUCT_FIELDS = `
|
||||
p.id, p.name, p.slug, p.shortdescription, p.description, p.price,
|
||||
p.category, p.stockquantity, p.sku, p.weight, p.dimensions,
|
||||
p.material, p.isfeatured, p.isbestseller, p.createdat
|
||||
`;
|
||||
|
||||
const PRODUCT_IMAGE_AGG = `
|
||||
COALESCE(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
'image_url', pi.image_url,
|
||||
'color_variant', pi.color_variant,
|
||||
'color_code', pi.color_code,
|
||||
'alt_text', pi.alt_text,
|
||||
'is_primary', pi.is_primary,
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order, pi.created_at
|
||||
) FILTER (WHERE pi.id IS NOT NULL),
|
||||
'[]'::json
|
||||
) as images
|
||||
`;
|
||||
|
||||
const handleDatabaseError = (res, error, context) => {
|
||||
logger.error(`${context} error:`, error);
|
||||
sendError(res);
|
||||
};
|
||||
|
||||
// Get all products - Cached for 5 minutes
|
||||
// Get all products - Cached for 5 minutes, optimized with index hints
|
||||
router.get(
|
||||
"/products",
|
||||
cacheMiddleware(300000), // 5 minutes cache
|
||||
cacheMiddleware(300000),
|
||||
asyncHandler(async (req, res) => {
|
||||
const result = await query(
|
||||
`SELECT p.id, p.name, p.slug, p.shortdescription, p.description, p.price,
|
||||
p.category, p.stockquantity, p.sku, p.weight, p.dimensions,
|
||||
p.material, p.isfeatured, p.isbestseller, p.createdat,
|
||||
COALESCE(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
'image_url', pi.image_url,
|
||||
'color_variant', pi.color_variant,
|
||||
'color_code', pi.color_code,
|
||||
'alt_text', pi.alt_text,
|
||||
'is_primary', pi.is_primary,
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order, pi.created_at
|
||||
) FILTER (WHERE pi.id IS NOT NULL),
|
||||
'[]'::json
|
||||
) as images
|
||||
`SELECT ${PRODUCT_FIELDS}, ${PRODUCT_IMAGE_AGG}
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE p.isactive = true
|
||||
GROUP BY p.id
|
||||
ORDER BY p.createdat DESC`
|
||||
ORDER BY p.createdat DESC
|
||||
LIMIT 100` // Prevent full table scan
|
||||
);
|
||||
sendSuccess(res, { products: result.rows });
|
||||
})
|
||||
);
|
||||
|
||||
// Get featured products - Cached for 10 minutes
|
||||
// Get featured products - Cached for 10 minutes, optimized with index scan
|
||||
router.get(
|
||||
"/products/featured",
|
||||
cacheMiddleware(600000, (req) => `featured:${req.query.limit || 4}`), // 10 minutes cache
|
||||
cacheMiddleware(600000, (req) => `featured:${req.query.limit || 4}`),
|
||||
asyncHandler(async (req, res) => {
|
||||
const limit = Math.min(parseInt(req.query.limit) || 4, 20); // Max 20 items
|
||||
const limit = Math.min(parseInt(req.query.limit) || 4, 20);
|
||||
const result = await query(
|
||||
`SELECT p.id, p.name, p.slug, p.shortdescription, p.price, p.category, p.stockquantity,
|
||||
COALESCE(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'image_url', pi.image_url,
|
||||
'color_variant', pi.color_variant,
|
||||
'color_code', pi.color_code,
|
||||
'alt_text', pi.alt_text,
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order, pi.created_at
|
||||
) FILTER (WHERE pi.id IS NOT NULL),
|
||||
'[]'::json
|
||||
) as images
|
||||
`SELECT p.id, p.name, p.slug, p.shortdescription, p.price,
|
||||
p.category, p.stockquantity, ${PRODUCT_IMAGE_AGG}
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE p.isactive = true AND p.isfeatured = true
|
||||
GROUP BY p.id
|
||||
ORDER BY p.createdat DESC
|
||||
ORDER BY p.createdat DESC
|
||||
LIMIT $1`,
|
||||
[limit]
|
||||
);
|
||||
@@ -82,23 +92,22 @@ router.get(
|
||||
})
|
||||
);
|
||||
|
||||
// Get single product by ID or slug
|
||||
// Get single product by ID or slug - Cached for 15 minutes
|
||||
router.get(
|
||||
"/products/:identifier",
|
||||
cacheMiddleware(900000, (req) => `product:${req.params.identifier}`),
|
||||
asyncHandler(async (req, res) => {
|
||||
const { identifier } = req.params;
|
||||
|
||||
// Check if identifier is a UUID
|
||||
const isUUID =
|
||||
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test(
|
||||
identifier
|
||||
);
|
||||
// Optimized UUID check
|
||||
const isUUID = identifier.length === 36 && identifier.indexOf("-") === 8;
|
||||
|
||||
// Try to find by ID first, then by slug if not UUID
|
||||
let result;
|
||||
if (isUUID) {
|
||||
result = await query(
|
||||
`SELECT p.*,
|
||||
// Single optimized query for both cases
|
||||
const whereClause = isUUID ? "p.id = $1" : "(p.id = $1 OR p.slug = $1)";
|
||||
|
||||
const result = await query(
|
||||
`SELECT p.*,
|
||||
COALESCE(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
@@ -111,37 +120,16 @@ router.get(
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order, pi.created_at
|
||||
) FILTER (WHERE pi.id IS NOT NULL) as images
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE p.id = $1 AND p.isactive = true
|
||||
GROUP BY p.id`,
|
||||
[identifier]
|
||||
);
|
||||
} else {
|
||||
// Try both ID and slug for non-UUID identifiers
|
||||
result = await query(
|
||||
`SELECT p.*,
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'id', pi.id,
|
||||
'image_url', pi.image_url,
|
||||
'color_variant', pi.color_variant,
|
||||
'color_code', pi.color_code,
|
||||
'alt_text', pi.alt_text,
|
||||
'display_order', pi.display_order,
|
||||
'is_primary', pi.is_primary,
|
||||
'variant_price', pi.variant_price,
|
||||
'variant_stock', pi.variant_stock
|
||||
) ORDER BY pi.display_order, pi.created_at
|
||||
) FILTER (WHERE pi.id IS NOT NULL) as images
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE (p.id = $1 OR p.slug = $1) AND p.isactive = true
|
||||
GROUP BY p.id`,
|
||||
[identifier]
|
||||
);
|
||||
}
|
||||
) FILTER (WHERE pi.id IS NOT NULL),
|
||||
'[]'::json
|
||||
) as images
|
||||
FROM products p
|
||||
LEFT JOIN product_images pi ON pi.product_id = p.id
|
||||
WHERE ${whereClause} AND p.isactive = true
|
||||
GROUP BY p.id
|
||||
LIMIT 1`,
|
||||
[identifier]
|
||||
);
|
||||
|
||||
if (result.rows.length === 0) {
|
||||
return sendNotFound(res, "Product");
|
||||
@@ -231,24 +219,31 @@ router.get(
|
||||
})
|
||||
);
|
||||
|
||||
// Get custom pages
|
||||
// Get custom pages - Cached for 10 minutes
|
||||
router.get(
|
||||
"/pages",
|
||||
cacheMiddleware(600000),
|
||||
asyncHandler(async (req, res) => {
|
||||
const result = await query(
|
||||
`SELECT id, title, slug, pagecontent as content, metatitle, metadescription, isactive, createdat
|
||||
FROM pages WHERE isactive = true ORDER BY createdat DESC`
|
||||
`SELECT id, title, slug, pagecontent as content, metatitle,
|
||||
metadescription, isactive, createdat
|
||||
FROM pages
|
||||
WHERE isactive = true
|
||||
ORDER BY createdat DESC`
|
||||
);
|
||||
sendSuccess(res, { pages: result.rows });
|
||||
})
|
||||
);
|
||||
|
||||
// Get single page by slug
|
||||
// Get single page by slug - Cached for 15 minutes
|
||||
router.get(
|
||||
"/pages/:slug",
|
||||
cacheMiddleware(900000, (req) => `page:${req.params.slug}`),
|
||||
asyncHandler(async (req, res) => {
|
||||
const result = await query(
|
||||
"SELECT id, title, slug, pagecontent as content, metatitle, metadescription FROM pages WHERE slug = $1 AND isactive = true",
|
||||
`SELECT id, title, slug, pagecontent as content, metatitle, metadescription
|
||||
FROM pages
|
||||
WHERE slug = $1 AND isactive = true`,
|
||||
[req.params.slug]
|
||||
);
|
||||
|
||||
@@ -260,9 +255,10 @@ router.get(
|
||||
})
|
||||
);
|
||||
|
||||
// Get menu items for frontend navigation
|
||||
// Get menu items for frontend navigation - Cached for 30 minutes
|
||||
router.get(
|
||||
"/menu",
|
||||
cacheMiddleware(1800000),
|
||||
asyncHandler(async (req, res) => {
|
||||
const result = await query(
|
||||
"SELECT settings FROM site_settings WHERE key = 'menu'"
|
||||
|
||||
@@ -9,6 +9,55 @@ const logger = require("../config/logger");
|
||||
const { uploadLimiter } = require("../config/rateLimiter");
|
||||
require("dotenv").config();
|
||||
|
||||
// Magic bytes for image file validation
|
||||
const MAGIC_BYTES = {
|
||||
jpeg: [0xff, 0xd8, 0xff],
|
||||
png: [0x89, 0x50, 0x4e, 0x47],
|
||||
gif: [0x47, 0x49, 0x46],
|
||||
webp: [0x52, 0x49, 0x46, 0x46],
|
||||
};
|
||||
|
||||
// Validate file content by checking magic bytes
|
||||
const validateFileContent = async (filePath, mimetype) => {
|
||||
try {
|
||||
const buffer = Buffer.alloc(8);
|
||||
const fd = await fs.open(filePath, "r");
|
||||
await fd.read(buffer, 0, 8, 0);
|
||||
await fd.close();
|
||||
|
||||
// Check JPEG
|
||||
if (mimetype === "image/jpeg" || mimetype === "image/jpg") {
|
||||
return buffer[0] === 0xff && buffer[1] === 0xd8 && buffer[2] === 0xff;
|
||||
}
|
||||
// Check PNG
|
||||
if (mimetype === "image/png") {
|
||||
return (
|
||||
buffer[0] === 0x89 &&
|
||||
buffer[1] === 0x50 &&
|
||||
buffer[2] === 0x4e &&
|
||||
buffer[3] === 0x47
|
||||
);
|
||||
}
|
||||
// Check GIF
|
||||
if (mimetype === "image/gif") {
|
||||
return buffer[0] === 0x47 && buffer[1] === 0x49 && buffer[2] === 0x46;
|
||||
}
|
||||
// Check WebP
|
||||
if (mimetype === "image/webp") {
|
||||
return (
|
||||
buffer[0] === 0x52 &&
|
||||
buffer[1] === 0x49 &&
|
||||
buffer[2] === 0x46 &&
|
||||
buffer[3] === 0x46
|
||||
);
|
||||
}
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error("Magic byte validation error:", error);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
// Allowed file types
|
||||
const ALLOWED_MIME_TYPES = (
|
||||
process.env.ALLOWED_FILE_TYPES || "image/jpeg,image/png,image/gif,image/webp"
|
||||
@@ -97,6 +146,28 @@ router.post(
|
||||
const folderId = req.body.folder_id ? parseInt(req.body.folder_id) : null;
|
||||
const files = [];
|
||||
|
||||
// Validate file content with magic bytes
|
||||
for (const file of req.files) {
|
||||
const isValid = await validateFileContent(file.path, file.mimetype);
|
||||
if (!isValid) {
|
||||
logger.warn("File upload rejected - magic byte mismatch", {
|
||||
filename: file.filename,
|
||||
mimetype: file.mimetype,
|
||||
userId: uploadedBy,
|
||||
});
|
||||
// Clean up invalid file
|
||||
await fs
|
||||
.unlink(file.path)
|
||||
.catch((err) =>
|
||||
logger.error("Failed to clean up invalid file:", err)
|
||||
);
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: `File ${file.originalname} failed security validation`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Insert each file into database
|
||||
for (const file of req.files) {
|
||||
try {
|
||||
|
||||
@@ -2,6 +2,7 @@ const express = require("express");
|
||||
const bcrypt = require("bcrypt");
|
||||
const { query } = require("../config/database");
|
||||
const { requireAuth, requireRole } = require("../middleware/auth");
|
||||
const { apiLimiter } = require("../config/rateLimiter");
|
||||
const logger = require("../config/logger");
|
||||
const {
|
||||
validators,
|
||||
@@ -10,6 +11,9 @@ const {
|
||||
const { asyncHandler } = require("../middleware/errorHandler");
|
||||
const router = express.Router();
|
||||
|
||||
// Apply rate limiting
|
||||
router.use(apiLimiter);
|
||||
|
||||
// Require admin role for all routes
|
||||
router.use(requireAuth);
|
||||
router.use(requireRole("role-admin"));
|
||||
@@ -211,12 +215,28 @@ router.put("/:id", async (req, res) => {
|
||||
|
||||
// Handle password update if provided
|
||||
if (password !== undefined && password !== "") {
|
||||
if (password.length < 8) {
|
||||
// Validate password strength
|
||||
if (password.length < 12) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message: "Password must be at least 8 characters long",
|
||||
message: "Password must be at least 12 characters long",
|
||||
});
|
||||
}
|
||||
|
||||
// Check password complexity
|
||||
const hasUpperCase = /[A-Z]/.test(password);
|
||||
const hasLowerCase = /[a-z]/.test(password);
|
||||
const hasNumber = /\d/.test(password);
|
||||
const hasSpecialChar = /[@$!%*?&#]/.test(password);
|
||||
|
||||
if (!hasUpperCase || !hasLowerCase || !hasNumber || !hasSpecialChar) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
message:
|
||||
"Password must contain uppercase, lowercase, number, and special character",
|
||||
});
|
||||
}
|
||||
|
||||
const hashedPassword = await bcrypt.hash(password, 10);
|
||||
updates.push(`passwordhash = $${paramCount++}`);
|
||||
values.push(hashedPassword);
|
||||
|
||||
@@ -6,6 +6,7 @@ const fs = require("fs");
|
||||
const helmet = require("helmet");
|
||||
const cors = require("cors");
|
||||
const compressionMiddleware = require("./middleware/compression");
|
||||
const { imageOptimization } = require("./middleware/imageOptimization");
|
||||
const { pool, healthCheck } = require("./config/database");
|
||||
const logger = require("./config/logger");
|
||||
const { apiLimiter, authLimiter } = require("./config/rateLimiter");
|
||||
@@ -18,6 +19,9 @@ const {
|
||||
} = require("./config/constants");
|
||||
require("dotenv").config();
|
||||
|
||||
// SAFEGUARD: Register global process error handlers FIRST
|
||||
require("./middleware/processHandlers");
|
||||
|
||||
const app = express();
|
||||
const PORT = process.env.PORT || 5000;
|
||||
const baseDir = getBaseDir();
|
||||
@@ -59,6 +63,8 @@ app.use(
|
||||
"https://fonts.gstatic.com",
|
||||
],
|
||||
connectSrc: ["'self'", "https://cdn.jsdelivr.net"],
|
||||
objectSrc: ["'none'"],
|
||||
upgradeInsecureRequests: !isDevelopment() ? [] : null,
|
||||
},
|
||||
},
|
||||
hsts: {
|
||||
@@ -66,6 +72,10 @@ app.use(
|
||||
includeSubDomains: true,
|
||||
preload: true,
|
||||
},
|
||||
frameguard: { action: "deny" },
|
||||
xssFilter: true,
|
||||
noSniff: true,
|
||||
referrerPolicy: { policy: "strict-origin-when-cross-origin" },
|
||||
})
|
||||
);
|
||||
|
||||
@@ -128,26 +138,47 @@ app.get("/index", (req, res) => {
|
||||
app.use(
|
||||
express.static(path.join(baseDir, "public"), {
|
||||
index: false,
|
||||
maxAge: "1d", // Cache static files for 1 day
|
||||
maxAge: "30d", // Cache static files for 30 days
|
||||
etag: true,
|
||||
lastModified: true,
|
||||
setHeaders: (res, filepath) => {
|
||||
// Aggressive caching for versioned files
|
||||
if (
|
||||
filepath.includes("?v=") ||
|
||||
filepath.match(/\.(\w+)\.[a-f0-9]{8,}\./)
|
||||
) {
|
||||
res.setHeader("Cache-Control", "public, max-age=31536000, immutable");
|
||||
}
|
||||
},
|
||||
})
|
||||
);
|
||||
app.use(
|
||||
"/assets",
|
||||
express.static(path.join(baseDir, "assets"), {
|
||||
maxAge: "7d", // Cache assets for 7 days
|
||||
maxAge: "365d", // Cache assets for 1 year
|
||||
etag: true,
|
||||
lastModified: true,
|
||||
immutable: true,
|
||||
setHeaders: (res, filepath) => {
|
||||
// Add immutable for all asset files
|
||||
res.setHeader("Cache-Control", "public, max-age=31536000, immutable");
|
||||
|
||||
// Add resource hints for fonts
|
||||
if (filepath.endsWith(".woff2") || filepath.endsWith(".woff")) {
|
||||
res.setHeader("Access-Control-Allow-Origin", "*");
|
||||
}
|
||||
},
|
||||
})
|
||||
);
|
||||
// Optimized image serving with aggressive caching
|
||||
app.use("/uploads", imageOptimization(path.join(baseDir, "uploads")));
|
||||
app.use(
|
||||
"/uploads",
|
||||
express.static(path.join(baseDir, "uploads"), {
|
||||
maxAge: "1d", // Cache uploads for 1 day
|
||||
maxAge: "365d", // Cache uploads for 1 year
|
||||
etag: true,
|
||||
lastModified: true,
|
||||
immutable: true,
|
||||
})
|
||||
);
|
||||
|
||||
@@ -166,10 +197,11 @@ app.use(
|
||||
secure: !isDevelopment(),
|
||||
httpOnly: true,
|
||||
maxAge: SESSION_CONFIG.COOKIE_MAX_AGE,
|
||||
sameSite: "lax",
|
||||
sameSite: isDevelopment() ? "lax" : "strict",
|
||||
},
|
||||
proxy: !isDevelopment(),
|
||||
name: SESSION_CONFIG.SESSION_NAME,
|
||||
rolling: true, // Reset session expiration on each request
|
||||
})
|
||||
);
|
||||
|
||||
|
||||
@@ -1,21 +1,48 @@
|
||||
const { query } = require("../config/database");
|
||||
|
||||
// Whitelist of allowed table names to prevent SQL injection
|
||||
const ALLOWED_TABLES = [
|
||||
"products",
|
||||
"product_images",
|
||||
"portfolioprojects",
|
||||
"blogposts",
|
||||
"pages",
|
||||
"adminusers",
|
||||
"roles",
|
||||
"uploads",
|
||||
"media_folders",
|
||||
"team_members",
|
||||
"site_settings",
|
||||
"session",
|
||||
];
|
||||
|
||||
// Validate table name against whitelist
|
||||
const validateTableName = (table) => {
|
||||
if (!ALLOWED_TABLES.includes(table)) {
|
||||
throw new Error(`Invalid table name: ${table}`);
|
||||
}
|
||||
return table;
|
||||
};
|
||||
|
||||
const buildSelectQuery = (
|
||||
table,
|
||||
conditions = [],
|
||||
orderBy = "createdat DESC"
|
||||
) => {
|
||||
validateTableName(table);
|
||||
const whereClause =
|
||||
conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
|
||||
return `SELECT * FROM ${table} ${whereClause} ORDER BY ${orderBy}`;
|
||||
};
|
||||
|
||||
const getById = async (table, id) => {
|
||||
validateTableName(table);
|
||||
const result = await query(`SELECT * FROM ${table} WHERE id = $1`, [id]);
|
||||
return result.rows[0] || null;
|
||||
};
|
||||
|
||||
const getAllActive = async (table, orderBy = "createdat DESC") => {
|
||||
validateTableName(table);
|
||||
const result = await query(
|
||||
`SELECT * FROM ${table} WHERE isactive = true ORDER BY ${orderBy}`
|
||||
);
|
||||
@@ -23,6 +50,7 @@ const getAllActive = async (table, orderBy = "createdat DESC") => {
|
||||
};
|
||||
|
||||
const deleteById = async (table, id) => {
|
||||
validateTableName(table);
|
||||
const result = await query(
|
||||
`DELETE FROM ${table} WHERE id = $1 RETURNING id`,
|
||||
[id]
|
||||
@@ -31,6 +59,7 @@ const deleteById = async (table, id) => {
|
||||
};
|
||||
|
||||
const countRecords = async (table, condition = "") => {
|
||||
validateTableName(table);
|
||||
const whereClause = condition ? `WHERE ${condition}` : "";
|
||||
const result = await query(`SELECT COUNT(*) FROM ${table} ${whereClause}`);
|
||||
return parseInt(result.rows[0].count);
|
||||
@@ -42,4 +71,5 @@ module.exports = {
|
||||
getAllActive,
|
||||
deleteById,
|
||||
countRecords,
|
||||
validateTableName,
|
||||
};
|
||||
|
||||
111
backend/utils/sanitization.js
Normal file
111
backend/utils/sanitization.js
Normal file
@@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Sanitization utilities for user input
|
||||
* Prevents XSS attacks by escaping HTML special characters
|
||||
*/
|
||||
|
||||
/**
|
||||
* Escape HTML special characters to prevent XSS
|
||||
* @param {string} str - String to escape
|
||||
* @returns {string} Escaped string
|
||||
*/
|
||||
const escapeHtml = (str) => {
|
||||
if (typeof str !== "string") {
|
||||
return str;
|
||||
}
|
||||
|
||||
const htmlEscapeMap = {
|
||||
"&": "&",
|
||||
"<": "<",
|
||||
">": ">",
|
||||
'"': """,
|
||||
"'": "'",
|
||||
"/": "/",
|
||||
};
|
||||
|
||||
return str.replace(/[&<>"'/]/g, (char) => htmlEscapeMap[char]);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sanitize object by escaping all string values
|
||||
* @param {Object} obj - Object to sanitize
|
||||
* @returns {Object} Sanitized object
|
||||
*/
|
||||
const sanitizeObject = (obj) => {
|
||||
if (typeof obj !== "object" || obj === null) {
|
||||
return obj;
|
||||
}
|
||||
|
||||
const sanitized = {};
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
if (typeof value === "string") {
|
||||
sanitized[key] = escapeHtml(value);
|
||||
} else if (typeof value === "object" && value !== null) {
|
||||
sanitized[key] = sanitizeObject(value);
|
||||
} else {
|
||||
sanitized[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
};
|
||||
|
||||
/**
|
||||
* Strip all HTML tags from a string
|
||||
* @param {string} str - String to strip
|
||||
* @returns {string} String without HTML tags
|
||||
*/
|
||||
const stripHtml = (str) => {
|
||||
if (typeof str !== "string") {
|
||||
return str;
|
||||
}
|
||||
|
||||
return str.replace(/<[^>]*>/g, "");
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate and sanitize URL
|
||||
* @param {string} url - URL to validate
|
||||
* @returns {string|null} Sanitized URL or null if invalid
|
||||
*/
|
||||
const sanitizeUrl = (url) => {
|
||||
if (typeof url !== "string") {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
// Only allow http and https protocols
|
||||
if (!["http:", "https:"].includes(parsed.protocol)) {
|
||||
return null;
|
||||
}
|
||||
return parsed.toString();
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Sanitize filename for safe storage
|
||||
* @param {string} filename - Filename to sanitize
|
||||
* @returns {string} Sanitized filename
|
||||
*/
|
||||
const sanitizeFilename = (filename) => {
|
||||
if (typeof filename !== "string") {
|
||||
return "file";
|
||||
}
|
||||
|
||||
// Remove path separators and null bytes
|
||||
return filename
|
||||
.replace(/[\/\\]/g, "")
|
||||
.replace(/\0/g, "")
|
||||
.replace(/[^a-zA-Z0-9._-]/g, "-")
|
||||
.substring(0, 255);
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
escapeHtml,
|
||||
sanitizeObject,
|
||||
stripHtml,
|
||||
sanitizeUrl,
|
||||
sanitizeFilename,
|
||||
};
|
||||
218
backend/validate-database.sh
Executable file
218
backend/validate-database.sh
Executable file
@@ -0,0 +1,218 @@
|
||||
#!/bin/bash
|
||||
# =====================================================
|
||||
# Database Schema Validation Script
|
||||
# Purpose: Apply all database fixes and verify alignment
|
||||
# Date: January 3, 2026
|
||||
# =====================================================
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DB_NAME="skyartshop"
|
||||
DB_USER="skyartapp"
|
||||
DB_HOST="localhost"
|
||||
export PGPASSWORD="SkyArt2025Pass"
|
||||
|
||||
echo "=================================="
|
||||
echo "SkyArtShop Database Fix & Validation"
|
||||
echo "=================================="
|
||||
echo ""
|
||||
|
||||
# Check if PostgreSQL is running
|
||||
echo "1. Checking PostgreSQL connection..."
|
||||
if ! psql -U $DB_USER -d $DB_NAME -h $DB_HOST -c "SELECT 1;" > /dev/null 2>&1; then
|
||||
echo "❌ Cannot connect to PostgreSQL"
|
||||
echo " Make sure PostgreSQL is running and credentials are correct"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ PostgreSQL connection successful"
|
||||
echo ""
|
||||
|
||||
# Apply database analysis and fixes
|
||||
echo "2. Applying database schema fixes..."
|
||||
if psql -U $DB_USER -d $DB_NAME -h $DB_HOST -f "$SCRIPT_DIR/database-analysis-fixes.sql" > /dev/null 2>&1; then
|
||||
echo "✅ Database schema fixes applied"
|
||||
else
|
||||
echo "⚠️ Some fixes may have failed (this is normal if they were already applied)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Verify tables exist
|
||||
echo "3. Verifying core tables..."
|
||||
TABLES=(
|
||||
"products"
|
||||
"product_images"
|
||||
"adminusers"
|
||||
"uploads"
|
||||
"media_folders"
|
||||
"blogposts"
|
||||
"portfolioprojects"
|
||||
"pages"
|
||||
"homepagesections"
|
||||
"team_members"
|
||||
"site_settings"
|
||||
)
|
||||
|
||||
MISSING_TABLES=()
|
||||
for table in "${TABLES[@]}"; do
|
||||
if psql -U $DB_USER -d $DB_NAME -h $DB_HOST -tAc "SELECT to_regclass('public.$table');" | grep -q "$table"; then
|
||||
echo " ✅ $table"
|
||||
else
|
||||
echo " ❌ $table (MISSING)"
|
||||
MISSING_TABLES+=("$table")
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [ ${#MISSING_TABLES[@]} -gt 0 ]; then
|
||||
echo "⚠️ Missing tables: ${MISSING_TABLES[*]}"
|
||||
echo " Please create these tables manually"
|
||||
else
|
||||
echo "✅ All core tables exist"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Verify indexes
|
||||
echo "4. Checking critical indexes..."
|
||||
INDEXES=(
|
||||
"idx_products_isactive"
|
||||
"idx_products_slug"
|
||||
"idx_product_images_product_id"
|
||||
"idx_blogposts_slug"
|
||||
"idx_pages_slug"
|
||||
"idx_uploads_folder_id"
|
||||
)
|
||||
|
||||
MISSING_INDEXES=()
|
||||
for index in "${INDEXES[@]}"; do
|
||||
if psql -U $DB_USER -d $DB_NAME -h $DB_HOST -tAc "SELECT to_regclass('public.$index');" | grep -q "$index"; then
|
||||
echo " ✅ $index"
|
||||
else
|
||||
echo " ⚠️ $index (missing or pending)"
|
||||
MISSING_INDEXES+=("$index")
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [ ${#MISSING_INDEXES[@]} -gt 0 ]; then
|
||||
echo "⚠️ Some indexes are missing: ${MISSING_INDEXES[*]}"
|
||||
else
|
||||
echo "✅ All critical indexes exist"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Verify foreign keys
|
||||
echo "5. Checking foreign key constraints..."
|
||||
FK_COUNT=$(psql -U $DB_USER -d $DB_NAME -h $DB_HOST -tAc "
|
||||
SELECT COUNT(*)
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_type = 'FOREIGN KEY'
|
||||
AND table_schema = 'public';
|
||||
")
|
||||
echo " Found $FK_COUNT foreign key constraints"
|
||||
echo ""
|
||||
|
||||
# Show table row counts
|
||||
echo "6. Table row counts:"
|
||||
psql -U $DB_USER -d $DB_NAME -h $DB_HOST -c "
|
||||
SELECT 'products' as table_name, COUNT(*) as rows FROM products
|
||||
UNION ALL
|
||||
SELECT 'product_images', COUNT(*) FROM product_images
|
||||
UNION ALL
|
||||
SELECT 'blogposts', COUNT(*) FROM blogposts
|
||||
UNION ALL
|
||||
SELECT 'portfolioprojects', COUNT(*) FROM portfolioprojects
|
||||
UNION ALL
|
||||
SELECT 'pages', COUNT(*) FROM pages
|
||||
UNION ALL
|
||||
SELECT 'uploads', COUNT(*) FROM uploads
|
||||
UNION ALL
|
||||
SELECT 'media_folders', COUNT(*) FROM media_folders
|
||||
UNION ALL
|
||||
SELECT 'adminusers', COUNT(*) FROM adminusers
|
||||
ORDER BY table_name;
|
||||
" 2>/dev/null || echo " Unable to query row counts"
|
||||
echo ""
|
||||
|
||||
# Check for missing columns
|
||||
echo "7. Validating critical columns..."
|
||||
COLUMN_CHECKS=(
|
||||
"products:slug"
|
||||
"products:shortdescription"
|
||||
"products:isfeatured"
|
||||
"product_images:color_variant"
|
||||
"product_images:variant_price"
|
||||
"uploads:folder_id"
|
||||
"pages:ispublished"
|
||||
)
|
||||
|
||||
MISSING_COLUMNS=()
|
||||
for check in "${COLUMN_CHECKS[@]}"; do
|
||||
table="${check%:*}"
|
||||
column="${check#*:}"
|
||||
|
||||
if psql -U $DB_USER -d $DB_NAME -h $DB_HOST -tAc "
|
||||
SELECT COUNT(*)
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = '$table'
|
||||
AND column_name = '$column';
|
||||
" | grep -q "1"; then
|
||||
echo " ✅ $table.$column"
|
||||
else
|
||||
echo " ❌ $table.$column (MISSING)"
|
||||
MISSING_COLUMNS+=("$table.$column")
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [ ${#MISSING_COLUMNS[@]} -gt 0 ]; then
|
||||
echo "❌ Missing columns: ${MISSING_COLUMNS[*]}"
|
||||
else
|
||||
echo "✅ All critical columns exist"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Run ANALYZE for query optimization
|
||||
echo "8. Running ANALYZE to update statistics..."
|
||||
psql -U $DB_USER -d $DB_NAME -h $DB_HOST -c "ANALYZE;" > /dev/null 2>&1
|
||||
echo "✅ Database statistics updated"
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=================================="
|
||||
echo "VALIDATION SUMMARY"
|
||||
echo "=================================="
|
||||
|
||||
TOTAL_ISSUES=0
|
||||
if [ ${#MISSING_TABLES[@]} -gt 0 ]; then
|
||||
echo "❌ Missing tables: ${#MISSING_TABLES[@]}"
|
||||
TOTAL_ISSUES=$((TOTAL_ISSUES + ${#MISSING_TABLES[@]}))
|
||||
fi
|
||||
|
||||
if [ ${#MISSING_INDEXES[@]} -gt 0 ]; then
|
||||
echo "⚠️ Missing indexes: ${#MISSING_INDEXES[@]}"
|
||||
fi
|
||||
|
||||
if [ ${#MISSING_COLUMNS[@]} -gt 0 ]; then
|
||||
echo "❌ Missing columns: ${#MISSING_COLUMNS[@]}"
|
||||
TOTAL_ISSUES=$((TOTAL_ISSUES + ${#MISSING_COLUMNS[@]}))
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ $TOTAL_ISSUES -eq 0 ]; then
|
||||
echo "✅ Database schema is healthy!"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Review query optimization: query-optimization-analysis.sql"
|
||||
echo "2. Update Prisma schema: backend/prisma/schema-updated.prisma"
|
||||
echo "3. Restart backend server to apply changes"
|
||||
else
|
||||
echo "⚠️ Found $TOTAL_ISSUES critical issues"
|
||||
echo ""
|
||||
echo "Please:"
|
||||
echo "1. Review the output above"
|
||||
echo "2. Run database-analysis-fixes.sql manually if needed"
|
||||
echo "3. Create any missing tables/columns"
|
||||
fi
|
||||
echo ""
|
||||
echo "=================================="
|
||||
@@ -229,7 +229,7 @@
|
||||
</div>
|
||||
<% } %>
|
||||
|
||||
<form method="POST" action="/admin/login">
|
||||
<form method="POST" action="/admin/login" id="loginForm">
|
||||
<div class="mb-3">
|
||||
<label for="email" class="form-label">Username:</label>
|
||||
<input
|
||||
@@ -256,8 +256,20 @@
|
||||
</div>
|
||||
|
||||
<div class="btn-group-custom">
|
||||
<button type="submit" class="btn-custom btn-login">Login</button>
|
||||
<button type="reset" class="btn-custom btn-reset">Reset</button>
|
||||
<button
|
||||
type="submit"
|
||||
class="btn-custom btn-login"
|
||||
id="loginButton"
|
||||
>
|
||||
Login
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="btn-custom btn-reset"
|
||||
onclick="document.getElementById('loginForm').reset();"
|
||||
>
|
||||
Reset
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
|
||||
Reference in New Issue
Block a user