'use client'
import { useState, useEffect } from 'react'
import { PerformanceStats as Stats, PerformanceByPageStat, getPerformanceByPage } from '@/lib/api/stats'
import { Select } from '@ciphera-net/ui'
import { TableSkeleton } from '@/components/skeletons'
interface Props {
stats: Stats | null
performanceByPage?: PerformanceByPageStat[] | null
siteId?: string
startDate?: string
endDate?: string
getPerformanceByPage?: typeof getPerformanceByPage
}
type Score = 'good' | 'needs-improvement' | 'poor'
const getScore = (metric: 'lcp' | 'cls' | 'inp', value: number): Score => {
if (metric === 'lcp') return value <= 2500 ? 'good' : value <= 4000 ? 'needs-improvement' : 'poor'
if (metric === 'cls') return value <= 0.1 ? 'good' : value <= 0.25 ? 'needs-improvement' : 'poor'
if (metric === 'inp') return value <= 200 ? 'good' : value <= 500 ? 'needs-improvement' : 'poor'
return 'good'
}
const scoreColors = {
good: 'text-green-600 bg-green-50 dark:bg-green-900/20 dark:text-green-400 border-green-200 dark:border-green-800',
'needs-improvement': 'text-yellow-600 bg-yellow-50 dark:bg-yellow-900/20 dark:text-yellow-400 border-yellow-200 dark:border-yellow-800',
poor: 'text-red-600 bg-red-50 dark:bg-red-900/20 dark:text-red-400 border-red-200 dark:border-red-800',
}
const badgeColors = {
good: 'text-green-700 dark:text-green-400 bg-green-100 dark:bg-green-900/30 border-green-200 dark:border-green-800',
'needs-improvement': 'text-yellow-700 dark:text-yellow-400 bg-yellow-100 dark:bg-yellow-900/30 border-yellow-200 dark:border-yellow-800',
poor: 'text-red-700 dark:text-red-400 bg-red-100 dark:bg-red-900/30 border-red-200 dark:border-red-800',
}
function MetricCard({ label, value, unit, score }: { label: string, value: string, unit: string, score: Score | null }) {
const noData = score === null
const colorClass = noData
? 'text-neutral-500 bg-neutral-50 dark:bg-neutral-800/50 dark:text-neutral-400 border-neutral-200 dark:border-neutral-700'
: scoreColors[score]
return (
{label}
{value}
{unit && {unit}}
)
}
function formatMetricValue(metric: 'lcp' | 'cls' | 'inp', val: number | null): string {
if (val == null) return 'No data'
if (metric === 'cls') return val.toFixed(3)
return `${Math.round(val)}`
}
function formatMetricCell(metric: 'lcp' | 'cls' | 'inp', val: number | null): string {
if (val == null) return '\u2014'
if (metric === 'cls') return val.toFixed(3)
return `${Math.round(val)} ms`
}
export default function PerformanceStats({ stats, performanceByPage, siteId, startDate, endDate, getPerformanceByPage }: Props) {
const [sortBy, setSortBy] = useState<'lcp' | 'cls' | 'inp'>('lcp')
const [overrideRows, setOverrideRows] = useState(null)
const [loadingTable, setLoadingTable] = useState(false)
useEffect(() => {
setOverrideRows(null)
}, [performanceByPage])
const rows = overrideRows ?? performanceByPage ?? []
const canRefetch = Boolean(getPerformanceByPage && siteId && startDate && endDate)
const handleSortChange = (value: string) => {
const v = value as 'lcp' | 'cls' | 'inp'
setSortBy(v)
if (!getPerformanceByPage || !siteId || !startDate || !endDate) return
setLoadingTable(true)
getPerformanceByPage(siteId, startDate, endDate, { sort: v, limit: 20 })
.then(setOverrideRows)
.finally(() => setLoadingTable(false))
}
const hasData = stats && stats.samples > 0
const lcp = stats?.lcp ?? null
const cls = stats?.cls ?? null
const inp = stats?.inp ?? null
const lcpScore = lcp != null ? getScore('lcp', lcp) : null
const clsScore = cls != null ? getScore('cls', cls) : null
const inpScore = inp != null ? getScore('inp', inp) : null
// Overall score: worst of available metrics
let overallScore: Score | null = null
if (hasData) {
const scores = [lcpScore, clsScore, inpScore].filter((s): s is Score => s !== null)
if (scores.length > 0) {
if (scores.includes('poor')) overallScore = 'poor'
else if (scores.includes('needs-improvement')) overallScore = 'needs-improvement'
else overallScore = 'good'
}
}
const overallLabel = overallScore
? { good: 'Good', 'needs-improvement': 'Needs improvement', poor: 'Poor' }[overallScore]
: 'No data'
const overallBadgeClass = overallScore
? badgeColors[overallScore]
: 'text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800 border-neutral-200 dark:border-neutral-700'
const getCellScoreClass = (score: Score) => {
const m: Record = {
good: 'text-green-600 dark:text-green-400',
'needs-improvement': 'text-yellow-600 dark:text-yellow-400',
poor: 'text-red-600 dark:text-red-400',
}
return m[score] ?? ''
}
const getCellClass = (metric: 'lcp' | 'cls' | 'inp', val: number | null) => {
if (val == null) return 'text-neutral-400 dark:text-neutral-500'
return getCellScoreClass(getScore(metric, val))
}
return (
{/* Overall badge + summary */}
{overallLabel}
{hasData && (
Based on {stats.samples.toLocaleString()} session{stats.samples !== 1 ? 's' : ''} (p75 values)
)}
{/* Metric cards */}
{!hasData && (
No performance data collected yet. Core Web Vitals data will appear here once visitors browse your site with performance insights enabled.
)}
{hasData && (
* 75th percentile (p75) calculated from real user sessions. Lower is better.
)}
{/* Worst pages by metric */}
Slowest pages by metric
{canRefetch && (
)}
{loadingTable ? (
) : rows.length === 0 ? (
No per-page metrics yet. Data appears as visitors are tracked with performance insights enabled.
) : (
| Path |
Samples |
LCP |
CLS |
INP |
{rows.map((r) => (
|
{r.path || '/'}
|
{r.samples} |
{formatMetricCell('lcp', r.lcp)}
|
{formatMetricCell('cls', r.cls)}
|
{formatMetricCell('inp', r.inp)}
|
))}
)}
)
}