diff --git a/.github/workflows/installation-test.yml b/.github/workflows/installation-test.yml
index 2f9986e..94c8451 100644
--- a/.github/workflows/installation-test.yml
+++ b/.github/workflows/installation-test.yml
@@ -31,12 +31,19 @@ jobs:
cache-dependency-path: backend-agent/requirements.txt
- run: pip install -r backend-agent/requirements.txt
- - name: Start server
+ - name: Start server and check health
run: |
cd backend-agent
- DISABLE_AGENT=1 python main.py &
- sleep 10
-
- - name: Check server health
- run: |
- curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/health
+ DISABLE_AGENT=1 DB_PATH=${RUNNER_TEMP}/data.db python main.py > server.log 2>&1 &
+ for i in {1..20}; do
+ sleep 1
+ status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/health || true)
+ if [ "$status" -eq 200 ]; then
+ echo "Health check succeeded"
+ cat server.log
+ exit 0
+ fi
+ done
+ echo "Health check failed after waiting"
+ cat server.log
+ exit 1
diff --git a/.gitignore b/.gitignore
index 0887fc1..f9a5ba7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -89,6 +89,8 @@ venv/
ENV/
env.bak/
venv.bak/
+venv310
+cache
# Spyder project settings
.spyderproject
diff --git a/backend-agent/.env.example b/backend-agent/.env.example
index 9bc0af0..9175bd5 100644
--- a/backend-agent/.env.example
+++ b/backend-agent/.env.example
@@ -12,3 +12,6 @@ API_KEY=super-secret-change-me
DEBUG=True
RESULT_SUMMARIZE_MODEL=gpt-4
+
+# Database path
+DB_PATH=path_to/database.db
diff --git a/backend-agent/app/db/models.py b/backend-agent/app/db/models.py
new file mode 100644
index 0000000..6866936
--- /dev/null
+++ b/backend-agent/app/db/models.py
@@ -0,0 +1,57 @@
+from flask_sqlalchemy import SQLAlchemy
+
+db = SQLAlchemy()
+
+
+# Represents a target model that can be attacked by various attacks.
+class TargetModel(db.Model):
+ __tablename__ = 'target_models'
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String, unique=True, nullable=False)
+ description = db.Column(db.String)
+
+
+# Represents an attack that can be performed on a target model.
+class Attack(db.Model):
+ __tablename__ = 'attacks'
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String, nullable=False, unique=True)
+ weight = db.Column(db.Integer, nullable=False, default=1, server_default="1") # noqa: E501
+
+
+# Represents a sub-attack that is part of a larger attack.
+class SubAttack(db.Model):
+ __tablename__ = 'sub_attacks'
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String, nullable=False)
+ description = db.Column(db.String)
+ attack_id = db.Column(db.Integer, db.ForeignKey('attacks.id'), nullable=False) # noqa: E501
+
+
+# Represents the results of each sigle attack on a target model.
+class AttackResult(db.Model):
+ __tablename__ = 'attack_results'
+ id = db.Column(db.Integer, primary_key=True)
+ attack_model_id = db.Column(db.Integer, db.ForeignKey('target_models.id'), nullable=False) # noqa: E501
+ attack_id = db.Column(db.Integer, db.ForeignKey('attacks.id'), nullable=False) # noqa: E501
+ success = db.Column(db.Boolean, nullable=False)
+ vulnerability_type = db.Column(db.String, nullable=True)
+ details = db.Column(db.JSON, nullable=True) # JSON field
+
+
+# Represents the global attack success rate of an attack on a target model,
+# including the total number of attacks and successful attacks.
+class ModelAttackScore(db.Model):
+ __tablename__ = 'model_attack_scores'
+ id = db.Column(db.Integer, primary_key=True)
+ attack_model_id = db.Column(db.Integer, db.ForeignKey('target_models.id'), nullable=False) # noqa: E501
+ attack_id = db.Column(db.Integer, db.ForeignKey('attacks.id'), nullable=False) # noqa: E501
+ total_number_of_attack = db.Column(db.Integer, nullable=False)
+ total_success = db.Column(db.Integer, nullable=False)
+
+ __table_args__ = (
+ db.UniqueConstraint('attack_model_id', 'attack_id', name='uix_model_attack'), # noqa: E501
+ )
+
+
+db.configure_mappers()
diff --git a/backend-agent/app/db/utils.py b/backend-agent/app/db/utils.py
new file mode 100644
index 0000000..f1cc505
--- /dev/null
+++ b/backend-agent/app/db/utils.py
@@ -0,0 +1,91 @@
+import logging
+
+from .models import (
+ Attack as AttackDB,
+ db,
+ TargetModel as TargetModelDB,
+ AttackResult as AttackResultDB,
+ ModelAttackScore as ModelAttackScoreDB,
+)
+
+from status import status
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+logger.addHandler(status.trace_logging)
+
+
+# Persist the attack result into the database for each attack.
+def save_to_db(attack_results: AttackResultDB) -> list[AttackResultDB]:
+ """
+ Persist the attack result into the database.
+ Returns a list of AttackResults that were added.
+ """
+ inserted_records = []
+
+ # Retrieve what to save to db
+ attack_name = attack_results.attack.lower()
+ success = attack_results.success
+ vulnerability_type = attack_results.vulnerability_type.lower()
+ details = attack_results.details # JSON column
+ target_name = details.get('target_model', '').lower()
+
+ # If target model name is not provided, skip saving
+ if not target_name:
+ logger.info("Skipping result: missing target model name.")
+ return
+
+ # If target model does not exist, create it
+ target_model = TargetModelDB.query.filter_by(name=target_name).first()
+ if not target_model:
+ target_model = TargetModelDB(name=target_name)
+ db.session.add(target_model)
+ db.session.flush()
+
+ # If attack does not exist, create it with default weight to 1
+ attack = AttackDB.query.filter_by(name=attack_name).first()
+ if not attack:
+ attack = AttackDB(name=attack_name, weight=1)
+ db.session.add(attack)
+ db.session.flush()
+
+ # Add the attack result to inserted_records
+ db_record = AttackResultDB(
+ attack_model_id=target_model.id,
+ attack_id=attack.id,
+ success=success,
+ vulnerability_type=vulnerability_type,
+ details=details,
+ )
+ db.session.add(db_record)
+ inserted_records.append(db_record)
+
+ # If model_attack_score does not exist, create it
+ # otherwise, update the existing record
+ model_attack_score = ModelAttackScoreDB.query.filter_by(
+ attack_model_id=target_model.id,
+ attack_id=attack.id
+ ).first()
+ if not model_attack_score:
+ model_attack_score = ModelAttackScoreDB(
+ attack_model_id=target_model.id,
+ attack_id=attack.id,
+ total_number_of_attack=details.get('total_attacks', 0),
+ total_success=details.get('number_successful_attacks', 0)
+ )
+ else:
+ model_attack_score.total_number_of_attack += details.get('total_attacks', 0) # noqa: E501
+ model_attack_score.total_success += details.get('number_successful_attacks', 0) # noqa: E501
+ db.session.add(model_attack_score)
+ inserted_records.append(model_attack_score)
+
+ # Commit the session to save all changes to the database
+ # or rollback if an error occurs
+ try:
+ db.session.commit()
+ logger.info("Results successfully saved to the database.")
+ return inserted_records
+ except Exception as e:
+ db.session.rollback()
+ logger.error("Error while saving to the database: %s", e)
+ return []
diff --git a/backend-agent/attack.py b/backend-agent/attack.py
index a394307..b81980e 100644
--- a/backend-agent/attack.py
+++ b/backend-agent/attack.py
@@ -1,18 +1,27 @@
-from argparse import Namespace
-from dataclasses import asdict
import json
-import os
import logging
+import os
+from argparse import Namespace
+from dataclasses import asdict
+from app.db.utils import save_to_db
from attack_result import AttackResult, SuiteResult
-from libs.artprompt import start_artprompt, \
- OUTPUT_FILE as artprompt_out_file
-from libs.codeattack import start_codeattack, \
- OUTPUT_FILE as codeattack_out_file
-from libs.gptfuzz import perform_gptfuzz_attack, \
- OUTPUT_FILE as gptfuzz_out_file
-from libs.promptmap import start_prompt_map, \
- OUTPUT_FILE as prompt_map_out_file
+from libs.artprompt import (
+ OUTPUT_FILE as artprompt_out_file,
+ start_artprompt,
+)
+from libs.codeattack import (
+ OUTPUT_FILE as codeattack_out_file,
+ start_codeattack,
+)
+from libs.gptfuzz import (
+ OUTPUT_FILE as gptfuzz_out_file,
+ perform_gptfuzz_attack,
+)
+from libs.promptmap import (
+ OUTPUT_FILE as prompt_map_out_file,
+ start_prompt_map,
+)
from libs.pyrit import start_pyrit_attack
from llm import LLM
from status import Trace
@@ -247,6 +256,7 @@ def run(self, summarize_by_llm: bool = False) -> SuiteResult:
summary = self.summarize_attack_result(result)
result.details['summary'] = summary
full_result.append(result)
+ save_to_db(result)
return SuiteResult(full_result)
def summarize_attack_result(self, attack_result: AttackResult) -> str:
diff --git a/backend-agent/libs/artprompt.py b/backend-agent/libs/artprompt.py
index 20a0895..da03a90 100644
--- a/backend-agent/libs/artprompt.py
+++ b/backend-agent/libs/artprompt.py
@@ -25,9 +25,11 @@
import pandas as pd
from nltk.corpus import stopwords
+from app.db.utils import save_to_db
from attack_result import AttackResult
from llm import LLM
-from status import status, Step
+from status import Step, status
+
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@@ -483,14 +485,17 @@ def start_artprompt(target_model: LLM,
logger.info(f'Write results to output file {outfile}')
with open(outfile, 'w') as f:
json.dump(evaluations_res, f, indent=4)
-
- return AttackResult(
+ result = AttackResult(
'artprompt',
successful_attacks > 0,
'prompt-injection',
{
+ 'target_model': target_model.model_name,
+ 'total_attacks': num_samples,
'number_successful_attacks': successful_attacks,
'successful_attacks': successful_attacks_list,
'attack_description': DESCRIPTION
}
)
+ save_to_db(result)
+ return result
diff --git a/backend-agent/libs/codeattack.py b/backend-agent/libs/codeattack.py
index d906712..227941e 100644
--- a/backend-agent/libs/codeattack.py
+++ b/backend-agent/libs/codeattack.py
@@ -8,9 +8,10 @@
from codeattack.post_processing import PostProcessor
from codeattack.target_llm import TargetLLM
+from app.db.utils import save_to_db
from attack_result import AttackResult
from llm import LLM
-from status import status, Step
+from status import Step, status
logger = logging.getLogger(__name__)
@@ -131,7 +132,6 @@ def start_codeattack(target_model: LLM,
prompts = random.sample(prompts,
min(int(num_prompts), len(prompts)))
logger.debug(f'Run {len(prompts)} prompt attacks')
-
output_file = parameters.get('output_file', OUTPUT_FILE)
data_key = f'code_wrapped_{prompt_type}'
@@ -204,16 +204,21 @@ def start_codeattack(target_model: LLM,
# # Write results to file
with open(output_file, 'w') as f:
json.dump(successful_attacks_list, f)
- return AttackResult(
+
+ result = AttackResult(
'codeattack',
successful_attacks > 0,
'prompt-injection',
{
+ 'target_model': target_model.model_name,
+ 'total_attacks': len(prompts),
'number_successful_attacks': successful_attacks,
'successful_attacks': successful_attacks_list,
'attack_description': DESCRIPTION
}
)
+ save_to_db(result)
+ return result
def _prompt_attack(data, target_llm, post_processor, judge_llm, data_key=''):
diff --git a/backend-agent/libs/gptfuzz.py b/backend-agent/libs/gptfuzz.py
index 8cc4abf..9a6b6f4 100644
--- a/backend-agent/libs/gptfuzz.py
+++ b/backend-agent/libs/gptfuzz.py
@@ -4,6 +4,7 @@
import pandas as pd
from dotenv import load_dotenv
+
from gptfuzzer.fuzzer.core import GPTFuzzer
from gptfuzzer.fuzzer.mutator import (MutateRandomSinglePolicy,
OpenAIMutatorCrossOver,
@@ -15,9 +16,10 @@
from gptfuzzer.llm import LLM, OpenAILLM
from gptfuzzer.utils.predict import RoBERTaPredictor
+from app.db.utils import save_to_db
from attack_result import AttackResult
from llm import LLM as AgentLLM
-from status import status, Step
+from status import Step, status
load_dotenv()
@@ -163,13 +165,16 @@ def perform_gptfuzz_attack(mutate_model: LLM,
with Step('Running Fuzzer'):
fuzzer.run()
logger.info('Fuzzer finished')
- return AttackResult(
+ result = AttackResult(
'gptfuzz',
fuzzer.current_jailbreak > 0,
'jailbreak',
- details={
- 'result_file': output_file,
- 'query_count': fuzzer.current_query,
- 'attack_description': DESCRIPTION
+ {
+ 'target_model': target_model.llm.model_name,
+ 'total_attacks': fuzzer.current_query,
+ 'number_successful_attacks': fuzzer.current_jailbreak,
+ 'attack_description': DESCRIPTION,
}
)
+ save_to_db(result)
+ return result
diff --git a/backend-agent/libs/promptmap.py b/backend-agent/libs/promptmap.py
index 3757d59..fdf038b 100644
--- a/backend-agent/libs/promptmap.py
+++ b/backend-agent/libs/promptmap.py
@@ -17,9 +17,10 @@
import logging
import re
+from app.db.utils import save_to_db
from attack_result import AttackResult
from llm import LLM
-from status import status, Step
+from status import Step, status
COUNT_PROMPTS = 2
@@ -305,7 +306,7 @@ def start_prompt_map(target_model: LLM, parameters: dict) -> AttackResult:
target_system_prompt)
except Exception as e:
logger.error('Error occurred while evaluating attack '
- 'success rate: ', e)
+ 'attack success rate: ', e)
continue
if is_successful:
logger.info('* Prompt attack successful!')
@@ -338,12 +339,16 @@ def start_prompt_map(target_model: LLM, parameters: dict) -> AttackResult:
# Write results to file
with open(output_file, 'w') as f:
json.dump(successful_attacks_json, f)
- return AttackResult(
+ result = AttackResult(
'promptmap',
security_failed > 0,
'prompt-injection',
{
+ 'total_attacks': total_attack_count,
+ 'number_successful_attacks': len(successful_attacks),
'successful_attacks': successful_attacks_json,
'attack_description': DESCRIPTION
}
)
+ save_to_db(result)
+ return result
diff --git a/backend-agent/libs/pyrit.py b/backend-agent/libs/pyrit.py
index 7f7a520..098a9c2 100644
--- a/backend-agent/libs/pyrit.py
+++ b/backend-agent/libs/pyrit.py
@@ -10,6 +10,7 @@
from pyrit.prompt_target.prompt_chat_target.openai_chat_target import OpenAIChatInterface # noqa E501
from pyrit.score import SupportTextClassification, Score
+from app.db.utils import save_to_db
from attack_result import AttackResult
from llm import LLM
from status import status, Step
@@ -227,7 +228,7 @@ def start_pyrit_attack(
max_turns = parameters.get('max_turns', 3)
display_intermediate_results = parameters.get(
'display_intermediate_results', True)
- vulnerability_type = parameters.get('vulnerability_type', '')
+ vulnerability_type = parameters.get('vulnerability_type', 'jailbreak')
with Step('Preparing Attack'):
attack_strategy = AttackStrategy(
@@ -251,15 +252,20 @@ def start_pyrit_attack(
initial_red_teaming_prompt='how can I help you?',
verbose=True,
) as red_teaming_orchestrator:
-
with Step('Running Attack'):
- result = red_teaming_orchestrator.apply_attack_strategy_until_completion( # noqa E501
+ attack_result = red_teaming_orchestrator.apply_attack_strategy_until_completion( # noqa E501
max_turns=max_turns,
display_intermediate_results=display_intermediate_results)
- return AttackResult(
+ result = AttackResult(
'PyRIT',
- success=result['success'],
- details={'response': result['response'],
- 'attack_description': DESCRIPTION},
- vulnerability_type=vulnerability_type
+ attack_result['success'],
+ vulnerability_type,
+ {
+ 'target_model': target_model.model_name,
+ 'response': attack_result['response'],
+ 'number_successful_attacks': 1 if attack_result['success'] else 0, # noqa: E501
+ 'attack_description': DESCRIPTION,
+ }
)
+ save_to_db(result)
+ return result
diff --git a/backend-agent/main.py b/backend-agent/main.py
index 7f6e80d..40a205a 100644
--- a/backend-agent/main.py
+++ b/backend-agent/main.py
@@ -1,16 +1,19 @@
import json
import os
+
from dotenv import load_dotenv
from flask import Flask, abort, jsonify, request, send_file
from flask_cors import CORS
from flask_sock import Sock
+from sqlalchemy import select
-if not os.getenv('DISABLE_AGENT'):
- from agent import agent
-from status import status, LangchainStatusCallbackHandler
+from app.db.models import TargetModel, ModelAttackScore, Attack, db
from attack_result import SuiteResult
+from status import LangchainStatusCallbackHandler, status
+if not os.getenv('DISABLE_AGENT'):
+ from agent import agent
#############################################################################
# Flask web server #
#############################################################################
@@ -21,6 +24,16 @@
load_dotenv()
+db_path = os.getenv("DB_PATH")
+
+if not db_path:
+ raise EnvironmentError(
+ "Missing DB_PATH environment variable. Please set DB_PATH in your \
+ .env file to a valid SQLite file path."
+ )
+
+app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{db_path}"
+
# Langfuse can be used to analyze tracings and help in debugging.
langfuse_handler = None
if os.getenv('ENABLE_LANGFUSE'):
@@ -40,6 +53,10 @@
} if langfuse_handler else {
'callbacks': [status_callback_handler]}
+with app.app_context():
+ db.init_app(app)
+ db.create_all() # create every SQLAlchemy tables defined in models.py
+
def send_intro(sock):
"""
@@ -129,6 +146,77 @@ def check_health():
return jsonify({'status': 'ok'})
+# Endpoint to fetch heatmap data from db
+@app.route('/api/heatmap', methods=['GET'])
+def get_heatmap():
+ """
+ Endpoint to retrieve heatmap data showing model score
+ against various attacks.
+
+ Queries the database for total attacks and successes per target model and
+ attack combination.
+ Calculates attack success rate and returns structured data for
+ visualization.
+
+ Returns:
+ JSON response with:
+ - models: List of target models and their attack success rate
+ per attack.
+ - attacks: List of attack names and their associated weights.
+
+ HTTP Status Codes:
+ 200: Data successfully retrieved.
+ 500: Internal server error during query execution.
+ """
+ try:
+ query = (
+ select(
+ ModelAttackScore.total_number_of_attack,
+ ModelAttackScore.total_success,
+ TargetModel.name.label("attack_model_name"),
+ Attack.name.label("attack_name"),
+ Attack.weight.label("attack_weight")
+ )
+ .join(TargetModel, ModelAttackScore.attack_model_id == TargetModel.id) # noqa: E501
+ .join(Attack, ModelAttackScore.attack_id == Attack.id)
+ )
+
+ scores = db.session.execute(query).all()
+ all_models = {}
+ all_attacks = {}
+
+ for score in scores:
+ model_name = score.attack_model_name
+ attack_name = score.attack_name
+
+ if attack_name not in all_attacks:
+ all_attacks[attack_name] = score.attack_weight
+
+ if model_name not in all_models:
+ all_models[model_name] = {
+ 'name': model_name,
+ 'scores': {},
+ }
+
+ # Compute attack success rate for this model/attack
+ success_ratio = (
+ round((score.total_success / score.total_number_of_attack) * 100) # noqa: E501
+ if score.total_number_of_attack else 0
+ )
+
+ all_models[model_name]['scores'][attack_name] = success_ratio
+
+ return jsonify({
+ 'models': list(all_models.values()),
+ 'attacks': [
+ {'name': name, 'weight': weight}
+ for name, weight in sorted(all_attacks.items())
+ ]
+ })
+ except Exception as e:
+ return jsonify({'error': str(e)}), 500
+
+
if __name__ == '__main__':
if not os.getenv('API_KEY'):
print('No API key is set! Access is unrestricted.')
diff --git a/backend-agent/requirements.txt b/backend-agent/requirements.txt
index b73c3d3..b5e7df8 100644
--- a/backend-agent/requirements.txt
+++ b/backend-agent/requirements.txt
@@ -23,3 +23,4 @@ pyrit==0.2.1
textattack>=0.3.10
codeattack @ git+https://github.com/marcorosa/CodeAttack
gptfuzzer @ git+https://github.com/marcorosa/GPTFuzz@no-vllm
+Flask-SQLAlchemy==3.1.1
\ No newline at end of file
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 11f1335..625a78a 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -18,7 +18,10 @@
"@angular/platform-browser": "^19.2.0",
"@angular/platform-browser-dynamic": "^19.2.0",
"@angular/router": "^19.2.0",
+ "apexcharts": "^4.7.0",
"ngx-markdown": "^19.1.0",
+ "node-sass": "^9.0.0",
+ "react-apexcharts": "^1.7.0",
"rxjs": "^7.8.2",
"sass": "^1.89.0",
"schematics-scss-migrate": "^2.3.17",
@@ -6645,6 +6648,62 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/@svgdotjs/svg.draggable.js": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@svgdotjs/svg.draggable.js/-/svg.draggable.js-3.0.6.tgz",
+ "integrity": "sha512-7iJFm9lL3C40HQcqzEfezK2l+dW2CpoVY3b77KQGqc8GXWa6LhhmX5Ckv7alQfUXBuZbjpICZ+Dvq1czlGx7gA==",
+ "license": "MIT",
+ "peerDependencies": {
+ "@svgdotjs/svg.js": "^3.2.4"
+ }
+ },
+ "node_modules/@svgdotjs/svg.filter.js": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@svgdotjs/svg.filter.js/-/svg.filter.js-3.0.9.tgz",
+ "integrity": "sha512-/69XMRCDoam2HgC4ldHIaDgeQf1ViHIsa0Ld4uWgiXtZ+E24DWHe/9Ib6kbNiZ7WRIdlVokUDR1Fg0kjIpkfbw==",
+ "license": "MIT",
+ "dependencies": {
+ "@svgdotjs/svg.js": "^3.2.4"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/@svgdotjs/svg.js": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/@svgdotjs/svg.js/-/svg.js-3.2.4.tgz",
+ "integrity": "sha512-BjJ/7vWNowlX3Z8O4ywT58DqbNRyYlkk6Yz/D13aB7hGmfQTvGX4Tkgtm/ApYlu9M7lCQi15xUEidqMUmdMYwg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/Fuzzyma"
+ }
+ },
+ "node_modules/@svgdotjs/svg.resize.js": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@svgdotjs/svg.resize.js/-/svg.resize.js-2.0.5.tgz",
+ "integrity": "sha512-4heRW4B1QrJeENfi7326lUPYBCevj78FJs8kfeDxn5st0IYPIRXoTtOSYvTzFWgaWWXd3YCDE6ao4fmv91RthA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14.18"
+ },
+ "peerDependencies": {
+ "@svgdotjs/svg.js": "^3.2.4",
+ "@svgdotjs/svg.select.js": "^4.0.1"
+ }
+ },
+ "node_modules/@svgdotjs/svg.select.js": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@svgdotjs/svg.select.js/-/svg.select.js-4.0.3.tgz",
+ "integrity": "sha512-qkMgso1sd2hXKd1FZ1weO7ANq12sNmQJeGDjs46QwDVsxSRcHmvWKL2NDF7Yimpwf3sl5esOLkPqtV2bQ3v/Jg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14.18"
+ },
+ "peerDependencies": {
+ "@svgdotjs/svg.js": "^3.2.4"
+ }
+ },
"node_modules/@tootallnate/once": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz",
@@ -7637,6 +7696,12 @@
"dev": true,
"license": "BSD-2-Clause"
},
+ "node_modules/@yr/monotone-cubic-spline": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@yr/monotone-cubic-spline/-/monotone-cubic-spline-1.0.3.tgz",
+ "integrity": "sha512-FQXkOta0XBSUPHndIKON2Y9JeQz5ZeMqLYZVVK93FliNBFm7LNMIZmY6FrMEB9XPcDbE2bekMbZD6kzDkxwYjA==",
+ "license": "MIT"
+ },
"node_modules/abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
@@ -7905,6 +7970,20 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
+ "node_modules/apexcharts": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/apexcharts/-/apexcharts-4.7.0.tgz",
+ "integrity": "sha512-iZSrrBGvVlL+nt2B1NpqfDuBZ9jX61X9I2+XV0hlYXHtTwhwLTHDKGXjNXAgFBDLuvSYCB/rq2nPWVPRv2DrGA==",
+ "license": "MIT",
+ "dependencies": {
+ "@svgdotjs/svg.draggable.js": "^3.0.4",
+ "@svgdotjs/svg.filter.js": "^3.0.8",
+ "@svgdotjs/svg.js": "^3.2.4",
+ "@svgdotjs/svg.resize.js": "^2.0.2",
+ "@svgdotjs/svg.select.js": "^4.0.1",
+ "@yr/monotone-cubic-spline": "^1.0.3"
+ }
+ },
"node_modules/aproba": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz",
@@ -13677,6 +13756,18 @@
"node": ">=8.0"
}
},
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
@@ -15338,7 +15429,6 @@
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
- "dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
@@ -16524,6 +16614,17 @@
"node": ">=10"
}
},
+ "node_modules/prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
"node_modules/proxy-addr": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
@@ -16675,6 +16776,35 @@
"node": ">= 0.8"
}
},
+ "node_modules/react": {
+ "version": "19.1.0",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz",
+ "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==",
+ "license": "MIT",
+ "peer": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-apexcharts": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/react-apexcharts/-/react-apexcharts-1.7.0.tgz",
+ "integrity": "sha512-03oScKJyNLRf0Oe+ihJxFZliBQM9vW3UWwomVn4YVRTN1jsIR58dLWt0v1sb8RwJVHDMbeHiKQueM0KGpn7nOA==",
+ "license": "MIT",
+ "dependencies": {
+ "prop-types": "^15.8.1"
+ },
+ "peerDependencies": {
+ "apexcharts": ">=4.0.0",
+ "react": ">=0.13"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
+ "license": "MIT"
+ },
"node_modules/read-pkg": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
diff --git a/frontend/package.json b/frontend/package.json
index 7763103..3df512a 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -21,7 +21,10 @@
"@angular/platform-browser": "^19.2.0",
"@angular/platform-browser-dynamic": "^19.2.0",
"@angular/router": "^19.2.0",
+ "apexcharts": "^4.7.0",
"ngx-markdown": "^19.1.0",
+ "node-sass": "^9.0.0",
+ "react-apexcharts": "^1.7.0",
"rxjs": "^7.8.2",
"sass": "^1.89.0",
"schematics-scss-migrate": "^2.3.17",
diff --git a/frontend/src/app/app-routing.module.ts b/frontend/src/app/app-routing.module.ts
index 0297262..9adec64 100755
--- a/frontend/src/app/app-routing.module.ts
+++ b/frontend/src/app/app-routing.module.ts
@@ -1,10 +1,16 @@
-import { NgModule } from '@angular/core';
-import { RouterModule, Routes } from '@angular/router';
+import {RouterModule, Routes} from '@angular/router';
-const routes: Routes = [];
+import {ChatzoneComponent} from './chatzone/chatzone.component';
+import {HeatmapComponent} from './heatmap/heatmap.component';
+import {NgModule} from '@angular/core';
+
+const routes: Routes = [
+ {path: '', component: ChatzoneComponent},
+ {path: 'heatmap', component: HeatmapComponent},
+];
@NgModule({
imports: [RouterModule.forRoot(routes)],
- exports: [RouterModule]
+ exports: [RouterModule],
})
-export class AppRoutingModule { }
+export class AppRoutingModule {}
diff --git a/frontend/src/app/app.component.html b/frontend/src/app/app.component.html
index d913607..2de8798 100755
--- a/frontend/src/app/app.component.html
+++ b/frontend/src/app/app.component.html
@@ -1 +1,2 @@
-