Optimize PredictionsChart with batch data loading
All checks were successful
Build Frontend / build (push) Successful in 1m5s

Refactors PredictionsChart to fetch all prediction data in a single batch for instant client-side switching between horizons and simulation factors. Updates state management and effects to utilize the pre-cached batch data, reducing API calls and improving responsiveness. Minor UI text update to remove TensorFlow.js mention.
This commit is contained in:
g
2026-01-12 04:33:31 +00:00
parent 198b9a82ff
commit f7af5b933d
2 changed files with 63 additions and 39 deletions

View File

@@ -49,8 +49,6 @@ import type { DateRange as ProfitDateRange } from "@/lib/services/profit-analyti
import { MotionWrapper } from "@/components/ui/motion-wrapper";
import { motion } from "framer-motion";
// Lazy load chart components - already handled individually below
const RevenueChart = dynamic(() => import("./RevenueChart"), {
loading: () => <ChartSkeleton />,
});

View File

@@ -82,6 +82,12 @@ export default function PredictionsChart({
const [baselinePredictions, setBaselinePredictions] = useState<PredictionsOverview | null>(
null,
);
// Batch data holds all pre-cached predictions for instant switching
const [batchData, setBatchData] = useState<{
[horizon: string]: {
[simulationFactor: string]: PredictionsOverview;
};
} | null>(null);
const [stockPredictions, setStockPredictions] =
useState<StockPredictionsResponse | null>(null);
const [loading, setLoading] = useState(true);
@@ -92,16 +98,33 @@ export default function PredictionsChart({
const [committedSimulationFactor, setCommittedSimulationFactor] = useState(0);
const { toast } = useToast();
// Fetch baseline predictions (simulation factor = 0)
const fetchBaseline = async () => {
// Fetch all predictions in batch (for instant client-side switching)
const fetchBatchData = async () => {
try {
setLoading(true);
const [overview, stock] = await Promise.all([
getPredictionsOverviewWithStore(daysAhead, timeRange, 0),
const { getBatchPredictionsWithStore } = await import("@/lib/services/analytics-service");
const [batchResponse, stock] = await Promise.all([
getBatchPredictionsWithStore(timeRange),
getStockPredictionsWithStore(timeRange),
]);
if (batchResponse.success && batchResponse.predictions) {
setBatchData(batchResponse.predictions);
// Set initial predictions from batch
const horizonData = batchResponse.predictions[daysAhead.toString()];
if (horizonData) {
const baseline = horizonData["0"];
if (baseline) {
setBaselinePredictions(baseline);
setPredictions(baseline);
}
}
} else {
// Fallback to single request if batch not available
const overview = await getPredictionsOverviewWithStore(daysAhead, timeRange, 0);
setBaselinePredictions(overview);
setPredictions(overview);
}
setStockPredictions(stock);
} catch (error) {
console.error("Error fetching predictions:", error);
@@ -115,43 +138,46 @@ export default function PredictionsChart({
}
};
// Fetch simulated predictions (without full reload)
const fetchSimulation = async (factor: number) => {
if (factor === 0) {
// Use cached baseline
setPredictions(baselinePredictions);
return;
}
// Switch predictions from batch data (no API call!)
const switchPredictions = useCallback((horizon: number, simFactor: number) => {
if (!batchData) return;
try {
setIsSimulating(true);
const overview = await getPredictionsOverviewWithStore(daysAhead, timeRange, factor / 100);
setPredictions(overview);
} catch (error) {
console.error("Error fetching simulation:", error);
toast({
title: "Error",
description: "Failed to load simulation",
variant: "destructive",
});
} finally {
setIsSimulating(false);
}
};
const horizonData = batchData[horizon.toString()];
if (!horizonData) return;
// Fetch baseline on initial load or when daysAhead/timeRange changes
// Simulation factor is stored as decimal (e.g., 0.1 for 10%)
const simKey = (simFactor / 100).toString();
const newPrediction = horizonData[simKey];
if (newPrediction) {
setPredictions(newPrediction);
if (simFactor === 0) {
setBaselinePredictions(newPrediction);
}
}
}, [batchData]);
// Fetch batch data on initial load or when timeRange changes
useEffect(() => {
fetchBaseline();
fetchBatchData();
setCommittedSimulationFactor(0);
setSimulationFactor(0);
}, [daysAhead, timeRange]);
}, [timeRange]);
// Fetch simulation when committed slider value changes
// Switch predictions when daysAhead changes (instant, from batch)
useEffect(() => {
if (baselinePredictions) {
fetchSimulation(committedSimulationFactor);
if (batchData) {
switchPredictions(daysAhead, committedSimulationFactor);
}
}, [committedSimulationFactor]);
}, [daysAhead, batchData, switchPredictions]);
// Switch predictions when simulation factor changes (instant, from batch)
useEffect(() => {
if (batchData) {
switchPredictions(daysAhead, committedSimulationFactor);
}
}, [committedSimulationFactor, batchData, switchPredictions]);
const getConfidenceColor = (confidence: string) => {
switch (confidence) {
@@ -305,7 +331,7 @@ export default function PredictionsChart({
<Button
variant="outline"
size="icon"
onClick={fetchBaseline}
onClick={fetchBatchData}
disabled={loading || isSimulating}
>
<RefreshCw
@@ -403,7 +429,7 @@ export default function PredictionsChart({
</span>
</TooltipTrigger>
<TooltipContent>
<p>Predictions generated using a Deep Learning Ensemble Model (TensorFlow.js)</p>
<p>Predictions generated using a Deep Learning Ensemble Model</p>
</TooltipContent>
</Tooltip>
)}