Skip to content
129 changes: 4 additions & 125 deletions src/lfx/src/lfx/_assets/component_index.json
Original file line number Diff line number Diff line change
Expand Up @@ -95195,7 +95195,7 @@
"icon": "NVIDIA",
"legacy": false,
"metadata": {
"code_hash": "fd6cae31c2a0",
"code_hash": "49e60e24ef53",
"dependencies": {
"dependencies": [
{
Expand Down Expand Up @@ -95312,7 +95312,7 @@
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Any\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass NVIDIAModelComponent(LCModelComponent):\n display_name = \"NVIDIA\"\n description = \"Generates text using NVIDIA LLMs.\"\n icon = \"NVIDIA\"\n\n try:\n import warnings\n\n # Suppresses repeated warnings about NIM key in langchain_nvidia_ai_endpoints==0.3.8\n warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"langchain_nvidia_ai_endpoints._common\")\n from langchain_nvidia_ai_endpoints import ChatNVIDIA\n\n all_models = ChatNVIDIA().get_available_models()\n except ImportError as e:\n msg = \"Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.\"\n raise ImportError(msg) from e\n except Exception as e: # noqa: BLE001\n logger.warning(f\"Failed to fetch NVIDIA models during initialization: {e}. Model list will be unavailable.\")\n all_models = []\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n info=\"The name of the NVIDIA model to use.\",\n advanced=False,\n value=None,\n options=sorted(model.id for model in all_models),\n combobox=True,\n refresh_button=True,\n ),\n BoolInput(\n name=\"detailed_thinking\",\n display_name=\"Detailed Thinking\",\n info=\"If true, the model will return a detailed thought process. Only supported by reasoning models.\",\n value=False,\n show=False,\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Enable Tool Models\",\n info=\"If enabled, only show models that support tool-calling.\",\n advanced=False,\n value=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"NVIDIA Base URL\",\n value=\"https://integrate.api.nvidia.com/v1\",\n info=\"The base URL of the NVIDIA API. Defaults to https://integrate.api.nvidia.com/v1.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"NVIDIA API Key\",\n info=\"The NVIDIA API Key.\",\n advanced=False,\n value=\"NVIDIA_API_KEY\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Run inference with this temperature.\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def get_models(self, *, tool_model_enabled: bool | None = None) -> list[str]:\n try:\n from langchain_nvidia_ai_endpoints import ChatNVIDIA\n except ImportError as e:\n msg = \"Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.\"\n raise ImportError(msg) from e\n\n # Note: don't include the previous model, as it may not exist in available models from the new base url\n model = ChatNVIDIA(base_url=self.base_url, api_key=self.api_key)\n if tool_model_enabled:\n tool_models = [m for m in model.get_available_models() if m.supports_tools]\n return sorted(m.id for m in tool_models)\n return sorted(m.id for m in model.available_models)\n\n def update_build_config(self, build_config: dotdict, _field_value: Any, field_name: str | None = None):\n if field_name in {\"model_name\", \"tool_model_enabled\", \"base_url\", \"api_key\"}:\n try:\n ids = self.get_models(tool_model_enabled=self.tool_model_enabled)\n build_config[\"model_name\"][\"options\"] = ids\n\n if \"value\" not in build_config[\"model_name\"] or build_config[\"model_name\"][\"value\"] is None:\n build_config[\"model_name\"][\"value\"] = ids[0]\n elif build_config[\"model_name\"][\"value\"] not in ids:\n build_config[\"model_name\"][\"value\"] = None\n\n # TODO: use api to determine if model supports detailed thinking\n if build_config[\"model_name\"][\"value\"] == \"nemotron\":\n build_config[\"detailed_thinking\"][\"show\"] = True\n else:\n build_config[\"detailed_thinking\"][\"value\"] = False\n build_config[\"detailed_thinking\"][\"show\"] = False\n except Exception as e:\n msg = f\"Error getting model names: {e}\"\n build_config[\"model_name\"][\"value\"] = None\n build_config[\"model_name\"][\"options\"] = []\n raise ValueError(msg) from e\n\n return build_config\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n try:\n from langchain_nvidia_ai_endpoints import ChatNVIDIA\n except ImportError as e:\n msg = \"Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.\"\n raise ImportError(msg) from e\n api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n seed = self.seed\n return ChatNVIDIA(\n max_tokens=max_tokens or None,\n model=model_name,\n base_url=self.base_url,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n"
"value": "from typing import Any\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, MessageTextInput, SecretStrInput, SliderInput\nfrom lfx.schema.dotdict import dotdict\n\n\nclass NVIDIAModelComponent(LCModelComponent):\n display_name = \"NVIDIA\"\n description = \"Generates text using NVIDIA LLMs.\"\n icon = \"NVIDIA\"\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n info=\"The name of the NVIDIA model to use.\",\n advanced=False,\n value=None,\n options=[],\n combobox=True,\n refresh_button=True,\n ),\n BoolInput(\n name=\"detailed_thinking\",\n display_name=\"Detailed Thinking\",\n info=\"If true, the model will return a detailed thought process. Only supported by reasoning models.\",\n value=False,\n show=False,\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Enable Tool Models\",\n info=\"If enabled, only show models that support tool-calling.\",\n advanced=False,\n value=False,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"NVIDIA Base URL\",\n value=\"https://integrate.api.nvidia.com/v1\",\n info=\"The base URL of the NVIDIA API. Defaults to https://integrate.api.nvidia.com/v1.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"NVIDIA API Key\",\n info=\"The NVIDIA API Key.\",\n advanced=False,\n value=\"NVIDIA_API_KEY\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n info=\"Run inference with this temperature.\",\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def get_models(self, *, tool_model_enabled: bool | None = None) -> list[str]:\n try:\n from langchain_nvidia_ai_endpoints import ChatNVIDIA\n except ImportError as e:\n msg = \"Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.\"\n raise ImportError(msg) from e\n\n # Note: don't include the previous model, as it may not exist in available models from the new base url\n model = ChatNVIDIA(base_url=self.base_url, api_key=self.api_key)\n if tool_model_enabled:\n tool_models = [m for m in model.get_available_models() if m.supports_tools]\n return sorted(m.id for m in tool_models)\n return sorted(m.id for m in model.available_models)\n\n def update_build_config(self, build_config: dotdict, _field_value: Any, field_name: str | None = None):\n if field_name in {\"model_name\", \"tool_model_enabled\", \"base_url\", \"api_key\"}:\n try:\n ids = self.get_models(tool_model_enabled=self.tool_model_enabled)\n build_config[\"model_name\"][\"options\"] = ids\n\n if \"value\" not in build_config[\"model_name\"] or build_config[\"model_name\"][\"value\"] is None:\n build_config[\"model_name\"][\"value\"] = ids[0]\n elif build_config[\"model_name\"][\"value\"] not in ids:\n build_config[\"model_name\"][\"value\"] = None\n\n # TODO: use api to determine if model supports detailed thinking\n if build_config[\"model_name\"][\"value\"] == \"nemotron\":\n build_config[\"detailed_thinking\"][\"show\"] = True\n else:\n build_config[\"detailed_thinking\"][\"value\"] = False\n build_config[\"detailed_thinking\"][\"show\"] = False\n except Exception as e:\n msg = f\"Error getting model names: {e}\"\n build_config[\"model_name\"][\"value\"] = None\n build_config[\"model_name\"][\"options\"] = []\n raise ValueError(msg) from e\n\n return build_config\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n try:\n from langchain_nvidia_ai_endpoints import ChatNVIDIA\n except ImportError as e:\n msg = \"Please install langchain-nvidia-ai-endpoints to use the NVIDIA model.\"\n raise ImportError(msg) from e\n api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n seed = self.seed\n return ChatNVIDIA(\n max_tokens=max_tokens or None,\n model=model_name,\n base_url=self.base_url,\n api_key=api_key,\n temperature=temperature or 0.1,\n seed=seed,\n )\n"
},
"detailed_thinking": {
"_input_type": "BoolInput",
Expand Down Expand Up @@ -95389,128 +95389,7 @@
"external_options": {},
"info": "The name of the NVIDIA model to use.",
"name": "model_name",
"options": [
"01-ai/yi-large",
"abacusai/dracarys-llama-3.1-70b-instruct",
"aisingapore/sea-lion-7b-instruct",
"baichuan-inc/baichuan2-13b-chat",
"bytedance/seed-oss-36b-instruct",
"deepseek-ai/deepseek-r1",
"deepseek-ai/deepseek-r1-0528",
"deepseek-ai/deepseek-r1-distill-llama-8b",
"deepseek-ai/deepseek-r1-distill-qwen-14b",
"deepseek-ai/deepseek-r1-distill-qwen-32b",
"deepseek-ai/deepseek-r1-distill-qwen-7b",
"deepseek-ai/deepseek-v3.1",
"deepseek-ai/deepseek-v3.1-terminus",
"deepseek-ai/deepseek-v3.2",
"google/codegemma-7b",
"google/gemma-2-27b-it",
"google/gemma-2-2b-it",
"google/gemma-2-9b-it",
"google/gemma-3-12b-it",
"google/gemma-3-1b-it",
"google/gemma-3-27b-it",
"google/gemma-3-4b-it",
"google/gemma-3n-e2b-it",
"google/gemma-3n-e4b-it",
"google/gemma-7b",
"google/paligemma",
"google/shieldgemma-9b",
"gotocompany/gemma-2-9b-cpt-sahabatai-instruct",
"ibm/granite-3.3-8b-instruct",
"ibm/granite-guardian-3.0-8b",
"igenius/colosseum_355b_instruct_16k",
"igenius/italia_10b_instruct_16k",
"institute-of-science-tokyo/llama-3.1-swallow-70b-instruct-v0.1",
"institute-of-science-tokyo/llama-3.1-swallow-8b-instruct-v0.1",
"marin/marin-8b-instruct",
"mediatek/breeze-7b-instruct",
"meta/llama-3.1-405b-instruct",
"meta/llama-3.1-70b-instruct",
"meta/llama-3.1-8b-instruct",
"meta/llama-3.2-11b-vision-instruct",
"meta/llama-3.2-1b-instruct",
"meta/llama-3.2-3b-instruct",
"meta/llama-3.2-90b-vision-instruct",
"meta/llama-3.3-70b-instruct",
"meta/llama-4-maverick-17b-128e-instruct",
"meta/llama-4-scout-17b-16e-instruct",
"meta/llama-guard-4-12b",
"meta/llama3-70b-instruct",
"meta/llama3-8b-instruct",
"microsoft/phi-3-medium-128k-instruct",
"microsoft/phi-3-medium-4k-instruct",
"microsoft/phi-3-mini-128k-instruct",
"microsoft/phi-3-mini-4k-instruct",
"microsoft/phi-3-small-128k-instruct",
"microsoft/phi-3-small-8k-instruct",
"microsoft/phi-3-vision-128k-instruct",
"microsoft/phi-3.5-mini-instruct",
"microsoft/phi-3.5-vision-instruct",
"microsoft/phi-4-mini-flash-reasoning",
"microsoft/phi-4-mini-instruct",
"microsoft/phi-4-multimodal-instruct",
"minimaxai/minimax-m2",
"mistralai/codestral-22b-instruct-v0.1",
"mistralai/magistral-small-2506",
"mistralai/mamba-codestral-7b-v0.1",
"mistralai/mathstral-7b-v0.1",
"mistralai/ministral-14b-instruct-2512",
"mistralai/mistral-7b-instruct-v0.2",
"mistralai/mistral-7b-instruct-v0.3",
"mistralai/mistral-large-3-675b-instruct-2512",
"mistralai/mistral-medium-3-instruct",
"mistralai/mistral-nemotron",
"mistralai/mistral-small-24b-instruct",
"mistralai/mistral-small-3.1-24b-instruct-2503",
"mistralai/mixtral-8x22b-instruct-v0.1",
"mistralai/mixtral-8x22b-v0.1",
"mistralai/mixtral-8x7b-instruct-v0.1",
"moonshotai/kimi-k2-instruct",
"moonshotai/kimi-k2-instruct-0905",
"moonshotai/kimi-k2-thinking",
"nv-mistralai/mistral-nemo-12b-instruct",
"nvidia/llama-3.1-nemoguard-8b-content-safety",
"nvidia/llama-3.1-nemoguard-8b-topic-control",
"nvidia/llama-3.1-nemotron-70b-reward",
"nvidia/llama-3.1-nemotron-nano-4b-v1.1",
"nvidia/llama-3.1-nemotron-nano-8b-v1",
"nvidia/llama-3.1-nemotron-nano-vl-8b-v1",
"nvidia/llama-3.1-nemotron-ultra-253b-v1",
"nvidia/llama-3.3-nemotron-super-49b-v1",
"nvidia/llama-3.3-nemotron-super-49b-v1.5",
"nvidia/llama3-chatqa-1.5-8b",
"nvidia/nemoretriever-parse",
"nvidia/nemotron-3-nano-30b-a3b",
"nvidia/nemotron-4-mini-hindi-4b-instruct",
"nvidia/nemotron-mini-4b-instruct",
"nvidia/nvclip",
"nvidia/nvidia-nemotron-nano-9b-v2",
"nvidia/riva-translate-4b-instruct",
"nvidia/usdcode-llama-3.1-70b-instruct",
"openai/gpt-oss-120b",
"openai/gpt-oss-20b",
"qwen/qwen2-7b-instruct",
"qwen/qwen2.5-7b-instruct",
"qwen/qwen2.5-coder-32b-instruct",
"qwen/qwen2.5-coder-7b-instruct",
"qwen/qwen3-235b-a22b",
"qwen/qwen3-next-80b-a3b-instruct",
"qwen/qwen3-next-80b-a3b-thinking",
"qwen/qwq-32b",
"rakuten/rakutenai-7b-chat",
"rakuten/rakutenai-7b-instruct",
"speakleash/bielik-11b-v2.3-instruct",
"stepfun-ai/step-3.5-flash",
"thudm/chatglm3-6b",
"tiiuae/falcon3-7b-instruct",
"tokyotech-llm/llama-3-swallow-70b-instruct-v0.1",
"upstage/solar-10.7b-instruct",
"utter-project/eurollm-9b-instruct",
"yentinglin/llama-3-taiwan-70b-instruct",
"zyphra/zamba2-7b-instruct"
],
"options": [],
"options_metadata": [],
"override_skip": false,
"placeholder": "",
Expand Down Expand Up @@ -118996,6 +118875,6 @@
"num_components": 360,
"num_modules": 97
},
"sha256": "51c85e55df57a79a1f07284039a71a762897e18eaf82c45bba2d3d0345dda69b",
"sha256": "b94764855c04b62d41aebb120df8bbe3cf0a0df12d2759ee638923624666b5b9",
"version": "0.4.0"
}
Loading
Loading