diff --git a/compress_html.py b/compress_html.py
deleted file mode 100644
index b808140..0000000
--- a/compress_html.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import gzip
-import os
-import glob
-
-SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
-HTML_DIR = os.path.join(SCRIPT_DIR, "src", "www")
-
-OUTPUT_HEADER_NAME = "www.h"
-OUTPUT_HEADER = os.path.join(HTML_DIR, OUTPUT_HEADER_NAME)
-
-SUPPORTED_EXTENSIONS = ["*.html", "*.js", "*.css","*.svg","*.png"]
-
-MIME_TYPES = {
- ".html": "text/html",
- ".htm": "text/html",
- ".js": "application/javascript",
- ".css": "text/css",
- ".json": "application/json",
- ".png": "image/png",
- ".jpg": "image/jpeg",
- ".jpeg": "image/jpeg",
- ".svg": "image/svg+xml",
- ".ico": "image/x-icon",
-}
-
-def generate_header_start(f):
- f.write("#ifndef WWW_H\n#define WWW_H\n\n")
- f.write("#include \n\n")
-
-def generate_header_end(f):
- f.write("\n#endif // WWW_H\n")
-
-def guess_mime_type(filename):
- _, ext = os.path.splitext(filename.lower())
- return MIME_TYPES.get(ext, "application/octet-stream")
-
-def compress_and_append_file(input_file, f):
- compressed_file = input_file + ".gz"
-
- with open(input_file, "rb") as infile:
- with gzip.open(compressed_file, "wb", compresslevel=9) as outfile:
- outfile.write(infile.read())
-
- with open(compressed_file, "rb") as cf:
- data = cf.read()
-
- array_name = os.path.basename(input_file).replace(".", "_")
-
- f.write(f"const uint8_t {array_name}_gz[] PROGMEM = {{\n")
- for i in range(0, len(data), 16):
- line = ', '.join(f'0x{b:02x}' for b in data[i:i+16])
- f.write(f" {line},\n")
- f.write("};\n\n")
- f.write(f"const unsigned int {array_name}_gz_len = {len(data)};\n")
- f.write(f"const char * {array_name}_gz_mime = \"{guess_mime_type(input_file)}\";\n\n")
-
- os.remove(compressed_file)
- print(f"Appended: {array_name}_gz to {OUTPUT_HEADER_NAME} with MIME {guess_mime_type(input_file)}")
-
-if __name__ == "__main__":
- if not os.path.isdir(HTML_DIR):
- print(f"Error: {HTML_DIR} is not a valid directory")
- exit(1)
-
- files_to_process = []
- for pattern in SUPPORTED_EXTENSIONS:
- files_to_process.extend(glob.glob(os.path.join(HTML_DIR, pattern)))
-
- if not files_to_process:
- print(f"No matching files found in {HTML_DIR}")
- exit(0)
-
- with open(OUTPUT_HEADER, "w") as f:
- generate_header_start(f)
-
- for file in files_to_process:
- compress_and_append_file(file, f)
-
- generate_header_end(f)
-
- print(f"\n✅ All files combined into: {OUTPUT_HEADER}")
diff --git a/pre_build.py b/pre_build.py
index 7c32612..f4ca409 100755
--- a/pre_build.py
+++ b/pre_build.py
@@ -1,14 +1,83 @@
-import subprocess
+# ---------------------------------------------------------------------------- #
+# Compress files in src/www directory into a single header file. #
+# ---------------------------------------------------------------------------- #
+
+import gzip
import os
+import glob
+
+WWW_DIR = os.path.join("src", "www")
+OUTPUT_HEADER_NAME = "www.h"
+OUTPUT_HEADER_FILE = os.path.join(WWW_DIR, OUTPUT_HEADER_NAME)
+
+SUPPORTED_EXTENSIONS = ["*.html", "*.js", "*.css", "*.svg", "*.png"]
+
+MIME_TYPES = {
+ ".css": "text/css",
+ ".htm": "text/html",
+ ".html": "text/html",
+ ".ico": "image/x-icon",
+ ".jpeg": "image/jpeg",
+ ".jpg": "image/jpeg",
+ ".js": "text/javascript",
+ ".json": "application/json",
+ ".png": "image/png",
+ ".svg": "image/svg+xml",
+}
+
+def guess_mime_type(filename):
+ _, ext = os.path.splitext(filename.lower())
+ return MIME_TYPES.get(ext, "application/octet-stream")
+
+def compress_and_generate_entry(input_file):
+ # Compress in-memory, no temp file
+ with open(input_file, "rb") as infile:
+ data = infile.read()
+ compressed_data = gzip.compress(data, compresslevel=9)
+
+ # ------------ Generate a C array name based on the file name ------------ #
+ # array_name = os.path.basename(input_file).replace(".", "_")
+ # ---------- Generate a C array name based on the relative path ---------- #
+ array_name = os.path.relpath(input_file, WWW_DIR).replace(os.sep, "_").replace(".", "_")
+
+ entry = [f"const uint8_t {array_name}_gz[] PROGMEM = {{\n"]
+ for i in range(0, len(compressed_data), 16):
+ line = ', '.join(f'0x{b:02x}' for b in compressed_data[i:i+16])
+ entry.append(f" {line},\n")
+
+ entry.append("};\n\n")
+ entry.append(f"const unsigned int {array_name}_gz_len = {len(compressed_data)};\n")
+ entry.append(f"const char * {array_name}_gz_mime = \"{guess_mime_type(input_file)}\";\n\n")
+ file = os.path.relpath(input_file, WWW_DIR)
+ print(f"Added: {file} as {array_name}_gz with MIME {guess_mime_type(input_file)}")
+ return ''.join(entry)
+
+def compress_files():
+
+ if not os.path.isdir(WWW_DIR):
+ print(f"❌ Error: {WWW_DIR} is not a valid directory")
+ exit(1)
+
+ files_to_process = set()
+ for pattern in SUPPORTED_EXTENSIONS:
+ files_to_process.update(glob.iglob(os.path.join(WWW_DIR, "**", pattern), recursive=True))
+
+ files_to_process = list(files_to_process)
+ if not files_to_process:
+ print(f"☑️ No matching files found in {WWW_DIR}")
+ exit(0)
+
+ entries = []
+ for fpath in files_to_process:
+ entries.append(compress_and_generate_entry(fpath))
+
+ with open(OUTPUT_HEADER_FILE, "w") as f:
+ f.write("#ifndef WWW_H\n#define WWW_H\n\n#include \n\n")
+ for entry in entries:
+ f.write(entry)
-def compress_html_files():
- # Change the working directory to the location of the compression script
- os.chdir("src/www")
+ f.write("\n#endif // WWW_H\n")
- # List of HTML files to compress
- html_files = ["setuppage.html, updatepage.html"]
- for html_file in html_files:
- print("Compressing file:", html_file)
- subprocess.run(["python", "../../compress_html.py", html_file])
+ print(f"\n✅ All files combined into: {OUTPUT_HEADER_FILE}")
-compress_html_files()
\ No newline at end of file
+compress_files()
\ No newline at end of file