Performance metrics moved from dashboard into a new Performance tab. Fixed null handling so "No data" shows instead of misleading zeros. Script no longer sends INP=0 when no interaction occurred.
244 lines
9.9 KiB
TypeScript
244 lines
9.9 KiB
TypeScript
'use client'
|
|
|
|
import { useState, useEffect } from 'react'
|
|
import { PerformanceStats as Stats, PerformanceByPageStat, getPerformanceByPage } from '@/lib/api/stats'
|
|
import { Select } from '@ciphera-net/ui'
|
|
import { TableSkeleton } from '@/components/skeletons'
|
|
|
|
interface Props {
|
|
stats: Stats | null
|
|
performanceByPage?: PerformanceByPageStat[] | null
|
|
siteId?: string
|
|
startDate?: string
|
|
endDate?: string
|
|
getPerformanceByPage?: typeof getPerformanceByPage
|
|
}
|
|
|
|
type Score = 'good' | 'needs-improvement' | 'poor'
|
|
|
|
const getScore = (metric: 'lcp' | 'cls' | 'inp', value: number): Score => {
|
|
if (metric === 'lcp') return value <= 2500 ? 'good' : value <= 4000 ? 'needs-improvement' : 'poor'
|
|
if (metric === 'cls') return value <= 0.1 ? 'good' : value <= 0.25 ? 'needs-improvement' : 'poor'
|
|
if (metric === 'inp') return value <= 200 ? 'good' : value <= 500 ? 'needs-improvement' : 'poor'
|
|
return 'good'
|
|
}
|
|
|
|
const scoreColors = {
|
|
good: 'text-green-600 bg-green-50 dark:bg-green-900/20 dark:text-green-400 border-green-200 dark:border-green-800',
|
|
'needs-improvement': 'text-yellow-600 bg-yellow-50 dark:bg-yellow-900/20 dark:text-yellow-400 border-yellow-200 dark:border-yellow-800',
|
|
poor: 'text-red-600 bg-red-50 dark:bg-red-900/20 dark:text-red-400 border-red-200 dark:border-red-800',
|
|
}
|
|
|
|
const badgeColors = {
|
|
good: 'text-green-700 dark:text-green-400 bg-green-100 dark:bg-green-900/30 border-green-200 dark:border-green-800',
|
|
'needs-improvement': 'text-yellow-700 dark:text-yellow-400 bg-yellow-100 dark:bg-yellow-900/30 border-yellow-200 dark:border-yellow-800',
|
|
poor: 'text-red-700 dark:text-red-400 bg-red-100 dark:bg-red-900/30 border-red-200 dark:border-red-800',
|
|
}
|
|
|
|
function MetricCard({ label, value, unit, score }: { label: string, value: string, unit: string, score: Score | null }) {
|
|
const noData = score === null
|
|
const colorClass = noData
|
|
? 'text-neutral-500 bg-neutral-50 dark:bg-neutral-800/50 dark:text-neutral-400 border-neutral-200 dark:border-neutral-700'
|
|
: scoreColors[score]
|
|
|
|
return (
|
|
<div className={`p-4 rounded-lg border ${colorClass}`}>
|
|
<div className="text-sm font-medium opacity-80 mb-1">{label}</div>
|
|
<div className="text-2xl font-bold">
|
|
{value}
|
|
{unit && <span className="text-sm font-normal ml-1 opacity-70">{unit}</span>}
|
|
</div>
|
|
</div>
|
|
)
|
|
}
|
|
|
|
function formatMetricValue(metric: 'lcp' | 'cls' | 'inp', val: number | null): string {
|
|
if (val == null) return 'No data'
|
|
if (metric === 'cls') return val.toFixed(3)
|
|
return `${Math.round(val)}`
|
|
}
|
|
|
|
function formatMetricCell(metric: 'lcp' | 'cls' | 'inp', val: number | null): string {
|
|
if (val == null) return '\u2014'
|
|
if (metric === 'cls') return val.toFixed(3)
|
|
return `${Math.round(val)} ms`
|
|
}
|
|
|
|
export default function PerformanceStats({ stats, performanceByPage, siteId, startDate, endDate, getPerformanceByPage }: Props) {
|
|
const [sortBy, setSortBy] = useState<'lcp' | 'cls' | 'inp'>('lcp')
|
|
const [overrideRows, setOverrideRows] = useState<PerformanceByPageStat[] | null>(null)
|
|
const [loadingTable, setLoadingTable] = useState(false)
|
|
|
|
useEffect(() => {
|
|
setOverrideRows(null)
|
|
}, [performanceByPage])
|
|
|
|
const rows = overrideRows ?? performanceByPage ?? []
|
|
const canRefetch = Boolean(getPerformanceByPage && siteId && startDate && endDate)
|
|
|
|
const handleSortChange = (value: string) => {
|
|
const v = value as 'lcp' | 'cls' | 'inp'
|
|
setSortBy(v)
|
|
if (!getPerformanceByPage || !siteId || !startDate || !endDate) return
|
|
setLoadingTable(true)
|
|
getPerformanceByPage(siteId, startDate, endDate, { sort: v, limit: 20 })
|
|
.then(setOverrideRows)
|
|
.finally(() => setLoadingTable(false))
|
|
}
|
|
|
|
const hasData = stats && stats.samples > 0
|
|
const lcp = stats?.lcp ?? null
|
|
const cls = stats?.cls ?? null
|
|
const inp = stats?.inp ?? null
|
|
|
|
const lcpScore = lcp != null ? getScore('lcp', lcp) : null
|
|
const clsScore = cls != null ? getScore('cls', cls) : null
|
|
const inpScore = inp != null ? getScore('inp', inp) : null
|
|
|
|
// Overall score: worst of available metrics
|
|
let overallScore: Score | null = null
|
|
if (hasData) {
|
|
const scores = [lcpScore, clsScore, inpScore].filter((s): s is Score => s !== null)
|
|
if (scores.length > 0) {
|
|
if (scores.includes('poor')) overallScore = 'poor'
|
|
else if (scores.includes('needs-improvement')) overallScore = 'needs-improvement'
|
|
else overallScore = 'good'
|
|
}
|
|
}
|
|
|
|
const overallLabel = overallScore
|
|
? { good: 'Good', 'needs-improvement': 'Needs improvement', poor: 'Poor' }[overallScore]
|
|
: 'No data'
|
|
|
|
const overallBadgeClass = overallScore
|
|
? badgeColors[overallScore]
|
|
: 'text-neutral-500 dark:text-neutral-400 bg-neutral-100 dark:bg-neutral-800 border-neutral-200 dark:border-neutral-700'
|
|
|
|
const getCellScoreClass = (score: Score) => {
|
|
const m: Record<string, string> = {
|
|
good: 'text-green-600 dark:text-green-400',
|
|
'needs-improvement': 'text-yellow-600 dark:text-yellow-400',
|
|
poor: 'text-red-600 dark:text-red-400',
|
|
}
|
|
return m[score] ?? ''
|
|
}
|
|
|
|
const getCellClass = (metric: 'lcp' | 'cls' | 'inp', val: number | null) => {
|
|
if (val == null) return 'text-neutral-400 dark:text-neutral-500'
|
|
return getCellScoreClass(getScore(metric, val))
|
|
}
|
|
|
|
return (
|
|
<div className="space-y-6">
|
|
{/* Overall badge + summary */}
|
|
<div className="flex items-center gap-3">
|
|
<span className={`rounded-md border px-2.5 py-1 text-sm font-medium ${overallBadgeClass}`}>
|
|
{overallLabel}
|
|
</span>
|
|
{hasData && (
|
|
<span className="text-sm text-neutral-500">
|
|
Based on {stats.samples.toLocaleString()} session{stats.samples !== 1 ? 's' : ''} (p75 values)
|
|
</span>
|
|
)}
|
|
</div>
|
|
|
|
{/* Metric cards */}
|
|
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
|
|
<MetricCard
|
|
label="Largest Contentful Paint (LCP)"
|
|
value={formatMetricValue('lcp', lcp)}
|
|
unit={lcp != null ? 'ms' : ''}
|
|
score={lcpScore}
|
|
/>
|
|
<MetricCard
|
|
label="Cumulative Layout Shift (CLS)"
|
|
value={formatMetricValue('cls', cls)}
|
|
unit=""
|
|
score={clsScore}
|
|
/>
|
|
<MetricCard
|
|
label="Interaction to Next Paint (INP)"
|
|
value={formatMetricValue('inp', inp)}
|
|
unit={inp != null ? 'ms' : ''}
|
|
score={inpScore}
|
|
/>
|
|
</div>
|
|
|
|
{!hasData && (
|
|
<div className="text-sm text-neutral-500 bg-neutral-50 dark:bg-neutral-800/50 rounded-lg p-4 border border-neutral-200 dark:border-neutral-700">
|
|
No performance data collected yet. Core Web Vitals data will appear here once visitors browse your site with performance insights enabled.
|
|
</div>
|
|
)}
|
|
|
|
{hasData && (
|
|
<div className="text-xs text-neutral-500">
|
|
* 75th percentile (p75) calculated from real user sessions. Lower is better.
|
|
</div>
|
|
)}
|
|
|
|
{/* Worst pages by metric */}
|
|
<div className="bg-white dark:bg-neutral-900 border border-neutral-200 dark:border-neutral-800 rounded-2xl p-6">
|
|
<div className="flex items-center justify-between gap-4 mb-4">
|
|
<h3 className="text-sm font-medium text-neutral-700 dark:text-neutral-300">
|
|
Slowest pages by metric
|
|
</h3>
|
|
{canRefetch && (
|
|
<Select
|
|
value={sortBy}
|
|
onChange={handleSortChange}
|
|
options={[
|
|
{ value: 'lcp', label: 'Sort by LCP (worst)' },
|
|
{ value: 'cls', label: 'Sort by CLS (worst)' },
|
|
{ value: 'inp', label: 'Sort by INP (worst)' },
|
|
]}
|
|
variant="input"
|
|
align="right"
|
|
className="min-w-[180px]"
|
|
/>
|
|
)}
|
|
</div>
|
|
{loadingTable ? (
|
|
<TableSkeleton rows={5} cols={5} />
|
|
) : rows.length === 0 ? (
|
|
<div className="py-6 text-center text-neutral-500 text-sm">
|
|
No per-page metrics yet. Data appears as visitors are tracked with performance insights enabled.
|
|
</div>
|
|
) : (
|
|
<div className="overflow-x-auto -mx-1">
|
|
<table className="w-full text-sm">
|
|
<thead>
|
|
<tr className="border-b border-neutral-200 dark:border-neutral-700">
|
|
<th className="text-left py-2 px-2 font-medium text-neutral-600 dark:text-neutral-400">Path</th>
|
|
<th className="text-right py-2 px-2 font-medium text-neutral-600 dark:text-neutral-400">Samples</th>
|
|
<th className="text-right py-2 px-2 font-medium text-neutral-600 dark:text-neutral-400">LCP</th>
|
|
<th className="text-right py-2 px-2 font-medium text-neutral-600 dark:text-neutral-400">CLS</th>
|
|
<th className="text-right py-2 px-2 font-medium text-neutral-600 dark:text-neutral-400">INP</th>
|
|
</tr>
|
|
</thead>
|
|
<tbody>
|
|
{rows.map((r) => (
|
|
<tr key={r.path} className="border-b border-neutral-100 dark:border-neutral-800/80">
|
|
<td className="py-2 px-2 text-neutral-900 dark:text-white font-mono truncate max-w-[200px]" title={r.path}>
|
|
{r.path || '/'}
|
|
</td>
|
|
<td className="py-2 px-2 text-right text-neutral-600 dark:text-neutral-400">{r.samples}</td>
|
|
<td className={`py-2 px-2 text-right font-mono ${getCellClass('lcp', r.lcp)}`}>
|
|
{formatMetricCell('lcp', r.lcp)}
|
|
</td>
|
|
<td className={`py-2 px-2 text-right font-mono ${getCellClass('cls', r.cls)}`}>
|
|
{formatMetricCell('cls', r.cls)}
|
|
</td>
|
|
<td className={`py-2 px-2 text-right font-mono ${getCellClass('inp', r.inp)}`}>
|
|
{formatMetricCell('inp', r.inp)}
|
|
</td>
|
|
</tr>
|
|
))}
|
|
</tbody>
|
|
</table>
|
|
</div>
|
|
)}
|
|
</div>
|
|
</div>
|
|
)
|
|
}
|