-
Notifications
You must be signed in to change notification settings - Fork 1
Sdk run updated providers branch #181
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -141,6 +141,83 @@ def bedrock_converse_stream_example(bedrock_runtime_client): | |
| except Exception as e: | ||
| print("Error streaming converse response:", e) | ||
| return "".join(tokens) | ||
| def test_claude_v2_invoke(bedrock_runtime_client): | ||
| print("\n--- Test: anthropic.claude-v2 / invoke ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model( | ||
| modelId="anthropic.claude-v2", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "Explain quantum computing"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| result = json.loads(response["body"].read()) | ||
| print(json.dumps(result, indent=2)) | ||
| except Exception as e: | ||
| print("❌ Error:", e) | ||
|
Comment on lines
+158
to
+159
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| def test_claude_v2_stream(bedrock_runtime_client): | ||
| print("\n--- Test: anthropic.claude-v2 / invoke-with-response-stream ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model_with_response_stream( | ||
| modelId="anthropic.claude-v2", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "Tell me about LLMs"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| output = "" | ||
| for part in response["body"]: | ||
| chunk = json.loads(part["chunk"]["bytes"].decode()) | ||
| delta = chunk.get("delta", {}).get("text", "") | ||
| output += delta | ||
| print(delta, end="", flush=True) | ||
| print("\nStreamed Output Complete.") | ||
| except Exception as e: | ||
| print("❌ Error:", e) | ||
|
Comment on lines
+161
to
+181
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Similar to the def invoke_model_stream_and_print(bedrock_runtime_client, model_id, messages):
try:
response = bedrock_runtime_client.invoke_model_with_response_stream(
modelId=model_id,
body=json.dumps({
"anthropic_version": "bedrock-2023-05-31",
"max_tokens": 100,
"messages": messages
}),
contentType="application/json"
)
output = ""
for part in response["body"]:
chunk = json.loads(part["chunk"]["bytes"].decode())
delta = chunk.get("delta", {}).get("text", "")
output += delta
print(delta, end="", flush=True)
print("\nStreamed Output Complete.")
except Exception as e:
print("❌ Error:", e)
def test_claude_v2_stream(bedrock_runtime_client):
print("\n--- Test: anthropic.claude-v2 / invoke-with-response-stream ---")
invoke_model_stream_and_print(bedrock_runtime_client, "anthropic.claude-v2",
[{"role": "user", "content": "Tell me about LLMs"}]) |
||
|
|
||
| def test_haiku_v3_invoke(bedrock_runtime_client): | ||
| print("\n--- Test: anthropic.claude-3-haiku-20240307-v1:0 / invoke ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model( | ||
| modelId="anthropic.claude-3-haiku-20240307-v1:0", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "What is generative AI?"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| result = json.loads(response["body"].read()) | ||
| print(json.dumps(result, indent=2)) | ||
| except Exception as e: | ||
| print("❌ Error:", e) | ||
|
|
||
| def test_haiku_v3_stream(bedrock_runtime_client): | ||
| print("\n--- Test: anthropic.claude-3-haiku-20240307-v1:0 / invoke-with-response-stream ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model_with_response_stream( | ||
| modelId="anthropic.claude-3-haiku-20240307-v1:0", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "What are AI guardrails?"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| output = "" | ||
| for part in response["body"]: | ||
| chunk = json.loads(part["chunk"]["bytes"].decode()) | ||
| delta = chunk.get("delta", {}).get("text", "") | ||
| output += delta | ||
| print(delta, end="", flush=True) | ||
| print("\nStreamed Output Complete.") | ||
| except Exception as e: | ||
| print("❌ Error:", e) | ||
|
|
||
|
|
||
| def main(): | ||
|
|
@@ -194,8 +271,83 @@ def main(): | |
| except Exception as e: | ||
| print("Error in bedrock_converse_stream_example:", e) | ||
|
|
||
| # 5) Test anthropic.claude-v2 / invoke | ||
| print("\n--- Test: anthropic.claude-v2 / invoke ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model( | ||
| modelId="anthropic.claude-v2", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "Explain quantum computing"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| result = json.loads(response["body"].read()) | ||
| print(json.dumps(result, indent=2)) | ||
| except Exception as e: | ||
| print("Error in claude-v2 invoke:", e) | ||
|
Comment on lines
+274
to
+289
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| # 6) Test anthropic.claude-v2 / invoke-with-response-stream | ||
| print("\n--- Test: anthropic.claude-v2 / invoke-with-response-stream ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model_with_response_stream( | ||
| modelId="anthropic.claude-v2", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "Tell me about LLMs"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| for part in response["body"]: | ||
| chunk = json.loads(part["chunk"]["bytes"].decode()) | ||
| delta = chunk.get("delta", {}).get("text", "") | ||
| print(delta, end="", flush=True) | ||
| print("\nStreamed Output Complete.") | ||
| except Exception as e: | ||
| print("Error in claude-v2 stream:", e) | ||
|
Comment on lines
+291
to
+309
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| # 7) Test anthropic.claude-3-haiku-20240307-v1:0 / invoke | ||
| print("\n--- Test: anthropic.claude-3-haiku-20240307-v1:0 / invoke ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model( | ||
| modelId="anthropic.claude-3-haiku-20240307-v1:0", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "What is generative AI?"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| result = json.loads(response["body"].read()) | ||
| print(json.dumps(result, indent=2)) | ||
| except Exception as e: | ||
| print("Error in haiku invoke:", e) | ||
|
Comment on lines
+311
to
+326
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| # 8) Test anthropic.claude-3-haiku-20240307-v1:0 / invoke-with-response-stream | ||
| print("\n--- Test: anthropic.claude-3-haiku-20240307-v1:0 / invoke-with-response-stream ---") | ||
| try: | ||
| response = bedrock_runtime_client.invoke_model_with_response_stream( | ||
| modelId="anthropic.claude-3-haiku-20240307-v1:0", | ||
| body=json.dumps({ | ||
| "anthropic_version": "bedrock-2023-05-31", | ||
| "max_tokens": 100, | ||
| "messages": [{"role": "user", "content": "What are AI guardrails?"}] | ||
| }), | ||
| contentType="application/json" | ||
| ) | ||
| for part in response["body"]: | ||
| chunk = json.loads(part["chunk"]["bytes"].decode()) | ||
| delta = chunk.get("delta", {}).get("text", "") | ||
| print(delta, end="", flush=True) | ||
| print("\nStreamed Output Complete.") | ||
| except Exception as e: | ||
| print("Error in haiku stream:", e) | ||
|
Comment on lines
+328
to
+346
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| print("\nScript complete.") | ||
|
|
||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| main() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This test function and the following ones share a lot of common structure. Consider refactoring the common parts into a reusable helper function to reduce duplication and improve maintainability. For example, the model invocation logic, json parsing, and error handling could be generalized.