From 81133bad23b671d1f51e88260051f2b72f493df6 Mon Sep 17 00:00:00 2001
From: Juan Carlos M P
Date: Wed, 22 Apr 2026 12:34:49 -0500
Subject: [PATCH] feat(windows): add native ROCm support for AMD GPUs
Implements native ROCm architecture for Windows.
- Adds backend build pipeline for voicebox-server-rocm.exe
- Detects AMD GPUs dynamically and routes PyTorch allocations
- Adds automatic download and update logic for ROCm dependencies
- Refactors UI in GpuPage.tsx and GpuAcceleration.tsx to add AMD flows
- Fixes 'Switch to CPU' lock on Windows via Tauri backend_override state
- Resolves PyInstaller/rocm_sdk UnboundLocalError silent crashes
- Resolves Numba/NumPy 2.x incompatibilities during Qwen3-TTS load
- Resolves HF_HUB_OFFLINE Catch-22 for CustomVoice processor caching
---
.gitignore | Bin 738 -> 948 bytes
app/package.json | 8 +-
.../ServerSettings/GpuAcceleration.test.tsx | 337 +++++++++++++
.../ServerSettings/GpuAcceleration.tsx | 406 +++++++++++----
app/src/components/ServerTab/GpuPage.tsx | 412 ++++++++++++----
app/src/i18n/locales/en/translation.json | 46 +-
app/src/lib/api/client.ts | 18 +
app/src/lib/api/models/ModelStatus.ts | 2 +-
app/src/lib/api/types.ts | 20 +
app/src/platform/types.ts | 1 +
app/src/test/setup.ts | 1 +
app/vite.config.ts | 7 +-
backend/app.py | 25 +-
backend/backends/base.py | 5 +
backend/backends/hume_backend.py | 10 +-
backend/backends/qwen_custom_voice_backend.py | 13 +-
backend/build_binary.py | 280 +++++++++--
backend/pyi_rth_rocm_sdk.py | 85 ++++
backend/requirements-rocm.txt | 4 +
backend/routes/__init__.py | 2 +
backend/routes/health.py | 19 +-
backend/routes/rocm.py | 79 +++
backend/server.py | 23 +-
backend/services/rocm.py | 465 ++++++++++++++++++
backend/tests/test_amd_gpu_detect.py | 87 ++++
backend/tests/test_rocm_backends.py | 68 +++
backend/tests/test_rocm_build.py | 129 +++++
backend/tests/test_rocm_download.py | 203 ++++++++
backend/tests/test_rocm_requirements.py | 130 +++++
backend/utils/platform_detect.py | 50 +-
tauri/src-tauri/src/main.rs | 176 +++++--
tauri/src/platform/lifecycle.ts | 9 +
32 files changed, 2823 insertions(+), 297 deletions(-)
create mode 100644 app/src/components/ServerSettings/GpuAcceleration.test.tsx
create mode 100644 app/src/test/setup.ts
create mode 100644 backend/pyi_rth_rocm_sdk.py
create mode 100644 backend/requirements-rocm.txt
create mode 100644 backend/routes/rocm.py
create mode 100644 backend/services/rocm.py
create mode 100644 backend/tests/test_amd_gpu_detect.py
create mode 100644 backend/tests/test_rocm_backends.py
create mode 100644 backend/tests/test_rocm_build.py
create mode 100644 backend/tests/test_rocm_download.py
create mode 100644 backend/tests/test_rocm_requirements.py
diff --git a/.gitignore b/.gitignore
index bcc1927cfbb9acb3a74f2690d850495089f27f89..853c5060975fbeadd1fbd442d6dfb9c9a648813d 100644
GIT binary patch
literal 948
zcma)4O^e$w5Y^e>|KO4yT0>;$KS)U^fu&u@(jH1NvNUnDktHF?Nqp&V?~}cOw%bE7
zo{{Fg`PQo1k|(RkN=>mWtW(quPK}0QQx@H5xpDH`l||e7NeX$QwgpggKYf=@{lM|9
zpSUz4!oB9vl?8vC(#hGfxRAYoyvW_>uZv@FgHL6#sy>d|sLGAWj|t97#{@=~tuvGQ
zey#1%-7jU4MCd7#YA(FbN)3Hhbfc_>sAnUg;F@o|-w&b(lC$l%JCt^bsG1OgYeiy?
z6t8oncy)04xsbmcz}OzzLvjKBPp5I{B3B5TLv2M8)w?lLSodTph(zi=8h{xQ-^`l#
zI-Q9SI(t00ejbl;C>J6RH`{miqJy&oSxkguP>ak%7iOV+x@V}48e1s~blx~DO?b_p
zW1e#oW6(_ua=n~7ZAHL7oBB0|f}2f@lp>cR{2RYGEva)iuBAo7zr*JcUWBBD;oe|t
zOQDa`-o_-1A%w+C@FW-Do3_ebW0hTwLgroD@uj;b8oUc4Oh_|$OeMhpRdZZlX7piK
zC5q{H|4zs=o^6xuu?ZAM1C7V?hyAE;X#@zukpkX0wR3CyDlq8(*>%1r%
z43U78Ks_ydP7!8|vxYV7A3v=bB~6*(u6tj7a9ygLD-$?R-0l&qsc;;(KAzIAJ+LVw
zieRHm&Jx`;Hq{L!PUIVcgT(us50e=Yc(7(6oHIwtTuK*nDrQk4I3u
zV}ImS5T4I<8c4aJF+0Pids7L^cEpIqFuJ$k1l90rUDW9Lf|#57A&sr~kVcSCJQHjm
zXJ1mBFfu4HVGYel6B_;D_g}k{7n?y^(@ADvvRB_o++$OV*HrBG=Wg)P6q%0R{6k5F
z`kn}xlr`l=dQRTrkZ#J`NG~d~DeO&9Imk3xg-^>N5}OdPG}AKLhJNUCsQzQob?#080_};Q#;t
diff --git a/app/package.json b/app/package.json
index f149fb1a..0455e8ea 100644
--- a/app/package.json
+++ b/app/package.json
@@ -8,6 +8,7 @@
"build": "vite build",
"typecheck": "tsc -p tsconfig.json --noEmit",
"preview": "vite preview",
+ "test": "vitest",
"lint": "biome lint src",
"lint:fix": "biome lint --write src",
"format": "biome format --write src",
@@ -60,11 +61,16 @@
},
"devDependencies": {
"@tailwindcss/vite": "^4.1.18",
+ "@testing-library/dom": "^10.4.0",
+ "@testing-library/jest-dom": "^6.5.0",
+ "@testing-library/react": "^16.0.0",
"@types/react": "^18.3.0",
"@types/react-dom": "^18.3.0",
"@vitejs/plugin-react": "^4.3.0",
+ "jsdom": "^25.0.0",
"tailwindcss": "^4.1.0",
"typescript": "^5.6.0",
- "vite": "^5.4.0"
+ "vite": "^5.4.0",
+ "vitest": "^2.1.0"
}
}
diff --git a/app/src/components/ServerSettings/GpuAcceleration.test.tsx b/app/src/components/ServerSettings/GpuAcceleration.test.tsx
new file mode 100644
index 00000000..25fb3412
--- /dev/null
+++ b/app/src/components/ServerSettings/GpuAcceleration.test.tsx
@@ -0,0 +1,337 @@
+import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
+import { fireEvent, render, screen, waitFor } from '@testing-library/react';
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+import { GpuAcceleration } from './GpuAcceleration';
+
+// Mock dependencies
+vi.mock('@/lib/api/client', () => ({
+ apiClient: {
+ getHealth: vi.fn(),
+ getCudaStatus: vi.fn(),
+ getRocmStatus: vi.fn(),
+ downloadCudaBackend: vi.fn(),
+ downloadRocmBackend: vi.fn(),
+ deleteCudaBackend: vi.fn(),
+ deleteRocmBackend: vi.fn(),
+ },
+}));
+
+vi.mock('@/lib/hooks/useServer', () => ({
+ useServerHealth: vi.fn(),
+}));
+
+vi.mock('@/platform/PlatformContext', () => ({
+ usePlatform: vi.fn(),
+}));
+
+vi.mock('@/stores/serverStore', () => ({
+ useServerStore: vi.fn((selector) => selector({ serverUrl: 'http://localhost:8000' })),
+}));
+
+import { apiClient } from '@/lib/api/client';
+import { useServerHealth } from '@/lib/hooks/useServer';
+import { usePlatform } from '@/platform/PlatformContext';
+
+const mockedApiClient = vi.mocked(apiClient);
+const mockedUseServerHealth = vi.mocked(useServerHealth);
+const mockedUsePlatform = vi.mocked(usePlatform);
+
+describe('GpuAcceleration', () => {
+ let queryClient: QueryClient;
+
+ beforeEach(() => {
+ queryClient = new QueryClient({
+ defaultOptions: {
+ queries: {
+ retry: false,
+ },
+ },
+ });
+
+ // Reset all mocks
+ vi.clearAllMocks();
+
+ // Default platform mock (Tauri app)
+ mockedUsePlatform.mockReturnValue({
+ metadata: { isTauri: true },
+ lifecycle: {
+ restartServer: vi.fn().mockResolvedValue(undefined),
+ setBackendOverride: vi.fn().mockResolvedValue(undefined),
+ },
+ } as any);
+ });
+
+ afterEach(() => {
+ vi.unstubAllGlobals();
+ vi.restoreAllMocks();
+ });
+
+ function renderComponent() {
+ return render(
+
+
+ ,
+ );
+ }
+
+ it('renders CPU status when no GPU is available', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: false,
+ backend_variant: 'cpu',
+ },
+ isLoading: false,
+ } as any);
+
+ mockedApiClient.getCudaStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.getRocmStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ renderComponent();
+
+ await waitFor(() => {
+ expect(screen.getByText('CPU')).toBeInTheDocument();
+ });
+ });
+
+ it('shows "Download AMD ROCm Backend" button when running CPU on AMD hardware', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: false,
+ backend_variant: 'cpu',
+ },
+ isLoading: false,
+ } as any);
+
+ mockedApiClient.getCudaStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.getRocmStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ renderComponent();
+
+ await waitFor(() => {
+ expect(screen.getByText('Download AMD ROCm Backend')).toBeInTheDocument();
+ });
+ });
+
+ it('shows ROCm download progress via SSE events', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: false,
+ backend_variant: 'cpu',
+ },
+ isLoading: false,
+ } as any);
+
+ mockedApiClient.getCudaStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.getRocmStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: true,
+ download_progress: {
+ model_name: 'rocm-backend',
+ current: 0,
+ total: 1000,
+ progress: 0,
+ filename: 'Downloading ROCm libraries...',
+ status: 'downloading',
+ timestamp: new Date().toISOString(),
+ },
+ });
+
+ // Mock EventSource — use vi.stubGlobal so vi.restoreAllMocks() in afterEach
+ // tears it down automatically and doesn't bleed into other tests.
+ const mockEventSource = {
+ onmessage: null as ((event: MessageEvent) => void) | null,
+ onerror: null as (() => void) | null,
+ close: vi.fn(),
+ };
+
+ vi.stubGlobal('EventSource', vi.fn(() => mockEventSource));
+
+ renderComponent();
+
+ await waitFor(() => {
+ expect(screen.getByText('Downloading ROCm libraries...')).toBeInTheDocument();
+ });
+
+ // Simulate SSE progress update
+ if (mockEventSource.onmessage) {
+ mockEventSource.onmessage(
+ new MessageEvent('message', {
+ data: JSON.stringify({
+ model_name: 'rocm-backend',
+ current: 500,
+ total: 1000,
+ progress: 50,
+ filename: 'Downloading ROCm libraries...',
+ status: 'downloading',
+ timestamp: new Date().toISOString(),
+ }),
+ }),
+ );
+ }
+
+ await waitFor(() => {
+ expect(screen.getByText('50.0%')).toBeInTheDocument();
+ });
+
+ // Simulate completion
+ if (mockEventSource.onmessage) {
+ mockEventSource.onmessage(
+ new MessageEvent('message', {
+ data: JSON.stringify({
+ model_name: 'rocm-backend',
+ current: 1000,
+ total: 1000,
+ progress: 100,
+ filename: 'Extracting ROCm libraries...',
+ status: 'complete',
+ timestamp: new Date().toISOString(),
+ }),
+ }),
+ );
+ }
+ });
+
+ it('shows "Switch to CPU Backend" when running ROCm', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: true,
+ gpu_type: 'ROCm (AMD Radeon RX 7900 XTX)',
+ backend_variant: 'rocm',
+ vram_used_mb: 2048,
+ },
+ isLoading: false,
+ } as any);
+
+ renderComponent();
+
+ await waitFor(() => {
+ expect(screen.getByText('AMD Radeon RX 7900 XTX')).toBeInTheDocument();
+ expect(screen.getByText('Switch to CPU Backend')).toBeInTheDocument();
+ });
+ });
+
+ it('shows "Switch to ROCm Backend" when ROCm is downloaded but not active', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: false,
+ backend_variant: 'cpu',
+ },
+ isLoading: false,
+ } as any);
+
+ mockedApiClient.getCudaStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.getRocmStatus.mockResolvedValue({
+ available: true,
+ active: false,
+ downloading: false,
+ });
+
+ renderComponent();
+
+ await waitFor(() => {
+ expect(screen.getByText('Switch to ROCm Backend')).toBeInTheDocument();
+ });
+ });
+
+ it('calls downloadRocmBackend when AMD download button is clicked', async () => {
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: false,
+ backend_variant: 'cpu',
+ },
+ isLoading: false,
+ } as any);
+
+ mockedApiClient.getCudaStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.getRocmStatus.mockResolvedValue({
+ available: false,
+ active: false,
+ downloading: false,
+ });
+
+ mockedApiClient.downloadRocmBackend.mockResolvedValue({
+ message: 'ROCm backend download started',
+ progress_key: 'rocm-backend',
+ });
+
+ renderComponent();
+
+ const downloadButton = await screen.findByText('Download AMD ROCm Backend');
+ fireEvent.click(downloadButton);
+
+ await waitFor(() => {
+ expect(mockedApiClient.downloadRocmBackend).toHaveBeenCalledTimes(1);
+ });
+ });
+
+ it('calls setBackendOverride("cpu") when switching from ROCm to CPU', async () => {
+ const setBackendOverrideMock = vi.fn().mockResolvedValue(undefined);
+ mockedUsePlatform.mockReturnValue({
+ metadata: { isTauri: true },
+ lifecycle: {
+ restartServer: vi.fn().mockResolvedValue(undefined),
+ setBackendOverride: setBackendOverrideMock,
+ },
+ } as any);
+
+ mockedUseServerHealth.mockReturnValue({
+ data: {
+ status: 'healthy',
+ gpu_available: true,
+ gpu_type: 'ROCm (AMD Radeon RX 7900 XTX)',
+ backend_variant: 'rocm',
+ vram_used_mb: 2048,
+ },
+ isLoading: false,
+ } as any);
+
+ renderComponent();
+
+ const switchButton = await screen.findByText('Switch to CPU Backend');
+ fireEvent.click(switchButton);
+
+ await waitFor(() => {
+ expect(setBackendOverrideMock).toHaveBeenCalledWith('cpu');
+ });
+ });
+});
diff --git a/app/src/components/ServerSettings/GpuAcceleration.tsx b/app/src/components/ServerSettings/GpuAcceleration.tsx
index 7b2c9749..46e0d4bd 100644
--- a/app/src/components/ServerSettings/GpuAcceleration.tsx
+++ b/app/src/components/ServerSettings/GpuAcceleration.tsx
@@ -5,7 +5,7 @@ import { Button } from '@/components/ui/button';
import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card';
import { Progress } from '@/components/ui/progress';
import { apiClient } from '@/lib/api/client';
-import type { CudaDownloadProgress } from '@/lib/api/types';
+import type { CudaDownloadProgress, RocmDownloadProgress } from '@/lib/api/types';
import { useServerHealth } from '@/lib/hooks/useServer';
import { usePlatform } from '@/platform/PlatformContext';
import { useServerStore } from '@/stores/serverStore';
@@ -21,6 +21,9 @@ export function GpuAcceleration() {
const [restartPhase, setRestartPhase] = useState('idle');
const [error, setError] = useState(null);
const [downloadProgress, setDownloadProgress] = useState(null);
+ const [rocmDownloadProgress, setRocmDownloadProgress] = useState(
+ null,
+ );
const healthPollRef = useRef | null>(null);
// Query CUDA backend status
@@ -36,10 +39,26 @@ export function GpuAcceleration() {
enabled: !!health, // Only fetch when backend is reachable
});
+ // Query ROCm backend status
+ const {
+ data: rocmStatus,
+ isLoading: _rocmStatusLoading,
+ refetch: refetchRocmStatus,
+ } = useQuery({
+ queryKey: ['rocm-status', serverUrl],
+ queryFn: () => apiClient.getRocmStatus(),
+ refetchInterval: (query) => (query.state.status === 'pending' ? false : 10000),
+ retry: 1,
+ enabled: !!health, // Only fetch when backend is reachable
+ });
+
// Derived state
const isCurrentlyCuda = health?.backend_variant === 'cuda';
+ const isCurrentlyRocm = health?.backend_variant === 'rocm';
const cudaAvailable = cudaStatus?.available ?? false;
const cudaDownloading = cudaStatus?.downloading ?? false;
+ const rocmAvailable = rocmStatus?.available ?? false;
+ const rocmDownloading = rocmStatus?.downloading ?? false;
// Clean up health poll on unmount
useEffect(() => {
@@ -51,7 +70,7 @@ export function GpuAcceleration() {
};
}, []);
- // SSE progress tracking during download
+ // SSE progress tracking during CUDA download
useEffect(() => {
if (!cudaDownloading || !serverUrl) {
return;
@@ -88,6 +107,43 @@ export function GpuAcceleration() {
};
}, [cudaDownloading, serverUrl, refetchCudaStatus]);
+ // SSE progress tracking during ROCm download
+ useEffect(() => {
+ if (!rocmDownloading || !serverUrl) {
+ return;
+ }
+
+ const eventSource = new EventSource(`${serverUrl}/backend/rocm-progress`);
+
+ eventSource.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data) as RocmDownloadProgress;
+ setRocmDownloadProgress(data);
+
+ if (data.status === 'complete') {
+ eventSource.close();
+ setRocmDownloadProgress(null);
+ refetchRocmStatus();
+ } else if (data.status === 'error') {
+ eventSource.close();
+ setError(data.error || 'Download failed');
+ setRocmDownloadProgress(null);
+ refetchRocmStatus();
+ }
+ } catch (e) {
+ console.error('Error parsing ROCm progress event:', e);
+ }
+ };
+
+ eventSource.onerror = () => {
+ eventSource.close();
+ };
+
+ return () => {
+ eventSource.close();
+ };
+ }, [rocmDownloading, serverUrl, refetchRocmStatus]);
+
// Start aggressive health polling during restart
const startHealthPolling = useCallback(() => {
if (healthPollRef.current) return;
@@ -113,7 +169,7 @@ export function GpuAcceleration() {
}, 1000);
}, [queryClient]);
- const handleDownload = async () => {
+ const handleDownloadCuda = async () => {
setError(null);
try {
await apiClient.downloadCudaBackend();
@@ -128,6 +184,21 @@ export function GpuAcceleration() {
}
};
+ const handleDownloadRocm = async () => {
+ setError(null);
+ try {
+ await apiClient.downloadRocmBackend();
+ refetchRocmStatus();
+ } catch (e: unknown) {
+ const msg = e instanceof Error ? e.message : 'Failed to start download';
+ if (msg.includes('already downloaded')) {
+ refetchRocmStatus();
+ } else {
+ setError(msg);
+ }
+ }
+ };
+
const handleRestart = async () => {
setError(null);
setRestartPhase('stopping');
@@ -154,18 +225,17 @@ export function GpuAcceleration() {
}
};
- const handleSwitchToCpu = async () => {
- // To switch to CPU: delete the CUDA binary, then restart.
- // start_server always prefers CUDA if present, so we must remove it first.
+ const handleSwitchToCpuFromCuda = async () => {
setError(null);
setRestartPhase('stopping');
try {
- await apiClient.deleteCudaBackend();
+ // Tell Rust launcher to skip GPU binary detection on next start.
+ // We cannot delete an active .exe on Windows, so we override instead.
+ await platform.lifecycle.setBackendOverride('cpu');
setRestartPhase('waiting');
startHealthPolling();
await platform.lifecycle.restartServer();
- // Invoke resolved — server is likely ready
if (healthPollRef.current) {
clearInterval(healthPollRef.current);
healthPollRef.current = null;
@@ -184,7 +254,36 @@ export function GpuAcceleration() {
}
};
- const handleDelete = async () => {
+ const handleSwitchToCpuFromRocm = async () => {
+ setError(null);
+ setRestartPhase('stopping');
+
+ try {
+ // Tell Rust launcher to skip GPU binary detection on next start.
+ // We cannot delete an active .exe on Windows, so we override instead.
+ await platform.lifecycle.setBackendOverride('cpu');
+ setRestartPhase('waiting');
+ startHealthPolling();
+ await platform.lifecycle.restartServer();
+ if (healthPollRef.current) {
+ clearInterval(healthPollRef.current);
+ healthPollRef.current = null;
+ }
+ setRestartPhase('ready');
+ queryClient.invalidateQueries();
+ setTimeout(() => setRestartPhase('idle'), 2000);
+ } catch (e: unknown) {
+ setRestartPhase('idle');
+ if (healthPollRef.current) {
+ clearInterval(healthPollRef.current);
+ healthPollRef.current = null;
+ }
+ setError(e instanceof Error ? e.message : 'Failed to switch to CPU');
+ refetchRocmStatus();
+ }
+ };
+
+ const handleDeleteCuda = async () => {
setError(null);
try {
await apiClient.deleteCudaBackend();
@@ -194,6 +293,16 @@ export function GpuAcceleration() {
}
};
+ const handleDeleteRocm = async () => {
+ setError(null);
+ try {
+ await apiClient.deleteRocmBackend();
+ refetchRocmStatus();
+ } catch (e: unknown) {
+ setError(e instanceof Error ? e.message : 'Failed to delete ROCm backend');
+ }
+ };
+
const formatBytes = (bytes: number): string => {
if (bytes === 0) return '0 B';
const k = 1024;
@@ -205,7 +314,7 @@ export function GpuAcceleration() {
// Don't render until health data is available
if (!health) return null;
- // If the system already has native GPU (MPS, etc.), only show info - no CUDA needed
+ // If the system already has native GPU (MPS, ROCm active, etc.), only show info - no download needed
const hasNativeGpu =
health.gpu_available &&
!isCurrentlyCuda &&
@@ -241,8 +350,6 @@ export function GpuAcceleration() {
)}
- {/* Native GPU detected - no CUDA download needed */}
-
{/* Currently running CUDA - show switch back to CPU */}
{isCurrentlyCuda && platform.metadata.isTauri && (
<>
@@ -261,7 +368,50 @@ export function GpuAcceleration() {
Running with CUDA GPU acceleration. Switch back to CPU if needed (you can
re-download later).
-