'use client'
import { useState, useEffect } from 'react'
import { motion } from 'framer-motion'
import { ChevronDownIcon } from '@ciphera-net/ui'
import { PerformanceStats as Stats, PerformanceByPageStat, getPerformanceByPage } from '@/lib/api/stats'
import { Select } from '@ciphera-net/ui'
import { TableSkeleton } from '@/components/skeletons'
interface Props {
stats: Stats
performanceByPage?: PerformanceByPageStat[] | null
siteId?: string
startDate?: string
endDate?: string
getPerformanceByPage?: typeof getPerformanceByPage
}
function MetricCard({ label, value, unit, score }: { label: string, value: number, unit: string, score: 'good' | 'needs-improvement' | 'poor' }) {
const colors = {
good: 'text-green-600 bg-green-50 dark:bg-green-900/20 dark:text-green-400 border-green-200 dark:border-green-800',
'needs-improvement': 'text-yellow-600 bg-yellow-50 dark:bg-yellow-900/20 dark:text-yellow-400 border-yellow-200 dark:border-yellow-800',
poor: 'text-red-600 bg-red-50 dark:bg-red-900/20 dark:text-red-400 border-red-200 dark:border-red-800',
}
return (
)
}
export default function PerformanceStats({ stats, performanceByPage, siteId, startDate, endDate, getPerformanceByPage }: Props) {
// * Scoring Logic (based on Google Web Vitals)
type Score = 'good' | 'needs-improvement' | 'poor'
const getScore = (metric: 'lcp' | 'cls' | 'inp', value: number): Score => {
if (metric === 'lcp') return value <= 2500 ? 'good' : value <= 4000 ? 'needs-improvement' : 'poor'
if (metric === 'cls') return value <= 0.1 ? 'good' : value <= 0.25 ? 'needs-improvement' : 'poor'
if (metric === 'inp') return value <= 200 ? 'good' : value <= 500 ? 'needs-improvement' : 'poor'
return 'good'
}
// * Overall performance: worst of LCP, CLS, INP (matches Google’s “field” rating)
const getOverallScore = (s: { lcp: number; cls: number; inp: number }): Score => {
const lcp = getScore('lcp', s.lcp)
const cls = getScore('cls', s.cls)
const inp = getScore('inp', s.inp)
if (lcp === 'poor' || cls === 'poor' || inp === 'poor') return 'poor'
if (lcp === 'needs-improvement' || cls === 'needs-improvement' || inp === 'needs-improvement') return 'needs-improvement'
return 'good'
}
const overallScore = getOverallScore(stats)
const overallLabel = { good: 'Good', 'needs-improvement': 'Needs improvement', poor: 'Poor' }[overallScore]
const overallBadgeClass = {
good: 'text-green-700 dark:text-green-400 bg-green-100 dark:bg-green-900/30 border-green-200 dark:border-green-800',
'needs-improvement': 'text-yellow-700 dark:text-yellow-400 bg-yellow-100 dark:bg-yellow-900/30 border-yellow-200 dark:border-yellow-800',
poor: 'text-red-700 dark:text-red-400 bg-red-100 dark:bg-red-900/30 border-red-200 dark:border-red-800',
}[overallScore]
const [mainExpanded, setMainExpanded] = useState(false)
const [sortBy, setSortBy] = useState<'lcp' | 'cls' | 'inp'>('lcp')
const [overrideRows, setOverrideRows] = useState(null)
const [loadingTable, setLoadingTable] = useState(false)
const [worstPagesOpen, setWorstPagesOpen] = useState(false)
// * When props.performanceByPage changes (e.g. date range), clear override so we use dashboard data
useEffect(() => {
setOverrideRows(null)
}, [performanceByPage])
const rows = overrideRows ?? performanceByPage ?? []
const canRefetch = Boolean(getPerformanceByPage && siteId && startDate && endDate)
const handleSortChange = (value: string) => {
const v = value as 'lcp' | 'cls' | 'inp'
setSortBy(v)
if (!getPerformanceByPage || !siteId || !startDate || !endDate) return
setLoadingTable(true)
getPerformanceByPage(siteId, startDate, endDate, { sort: v, limit: 20 })
.then(setOverrideRows)
.finally(() => setLoadingTable(false))
}
const cellScoreClass = (score: 'good' | 'needs-improvement' | 'poor') => {
const m: Record = {
good: 'text-green-600 dark:text-green-400',
'needs-improvement': 'text-yellow-600 dark:text-yellow-400',
poor: 'text-red-600 dark:text-red-400',
}
return m[score] ?? ''
}
const formatMetric = (metric: 'lcp' | 'cls' | 'inp', val: number | null) => {
if (val == null) return '—'
if (metric === 'cls') return val.toFixed(3)
return `${Math.round(val)} ms`
}
const getCellClass = (metric: 'lcp' | 'cls' | 'inp', val: number | null) => {
if (val == null) return 'text-neutral-400 dark:text-neutral-500'
return cellScoreClass(getScore(metric, val))
}
const summaryText = `LCP ${Math.round(stats.lcp)} ms · CLS ${Number(stats.cls.toFixed(3))} · INP ${Math.round(stats.inp)} ms`
return (
{/* * One-line summary: Performance score + metric summary. Click to expand. */}
{/* * Expanded: full LCP/CLS/INP cards, footnote, and Worst pages (collapsible) */}
* Averages calculated from real user sessions. Lower is better.
{/* * Worst pages by metric – collapsed by default */}
{worstPagesOpen && canRefetch && (
)}
{loadingTable ? (
) : rows.length === 0 ? (
No per-page metrics yet. Data appears as visitors are tracked with performance insights enabled.
) : (
| Path |
Samples |
LCP |
CLS |
INP |
{rows.map((r) => (
|
{r.path || '/'}
|
{r.samples} |
{formatMetric('lcp', r.lcp)}
|
{formatMetric('cls', r.cls)}
|
{formatMetric('inp', r.inp)}
|
))}
)}
)
}