Prompt ChatGPT Demo
+
+ + This is demo application. You can ue this app to ensure + implementation with ChatGPT works properly. Use input below to + enter prompts and get a response. +
+diff --git a/node/prompt-chatgpt/.gitignore b/node/prompt-chatgpt/.gitignore new file mode 100644 index 00000000..6a7d6d8e --- /dev/null +++ b/node/prompt-chatgpt/.gitignore @@ -0,0 +1,130 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional stylelint cache +.stylelintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# vuepress v2.x temp and cache directory +.temp +.cache + +# Docusaurus cache and generated files +.docusaurus + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* \ No newline at end of file diff --git a/node/prompt-chatgpt/.prettierrc.json b/node/prompt-chatgpt/.prettierrc.json new file mode 100644 index 00000000..0a725205 --- /dev/null +++ b/node/prompt-chatgpt/.prettierrc.json @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "tabWidth": 2, + "semi": true, + "singleQuote": true +} diff --git a/node/prompt-chatgpt/README.md b/node/prompt-chatgpt/README.md new file mode 100644 index 00000000..2a782efc --- /dev/null +++ b/node/prompt-chatgpt/README.md @@ -0,0 +1,85 @@ +# 🤖 Node Prompt ChatGPT Function + +Ask question, and let OpenAI GPT-3.5-turbo answer. + +## 🧰 Usage + +### `GET` + +HTML form for interacting with the model. + +### `POST` + +Query the model for a completion. + +**Parameters** + +| Name | Description | Location | Type | Sample Value | +| ------------ | ------------------------------------ | -------- | ------------------ | ----------------------------- | +| Content-Type | The content type of the request body | Header | `application/json` | N/A | +| prompt | Text to prompt the model | Body | String | `Write a haiku about Mondays` | + +Sample `200` Response: + +Response from the model. + +```json +{ + "ok": true, + "completion": "Monday's heavy weight, Dawning with a sigh of grey, Hopeful hearts await." +} +``` + +Sample `400` Response: + +Response when the request body is missing. + +```json +{ + "ok": false, + "error": "Missing body with a prompt." +} +``` + +Sample `500` Response: + +Response when the model fails to respond. + +```json +{ + "ok": false, + "error": "Failed to query model." +} +``` + +## ⚙️ Configuration + +| Setting | Value | +| ----------------- | ------------- | +| Runtime | Node (18.0) | +| Entrypoint | `src/main.js` | +| Build Commands | `npm install` | +| Permissions | `any` | +| Timeout (Seconds) | 15 | + +## 🔒 Environment Variables + +### OPENAI_API_KEY + +A unique key used to authenticate with the OpenAI API. Please note that this is a paid service and you will be charged for each request made to the API. For more information, see the [OpenAI pricing page](https://openai.com/pricing/). + +| Question | Answer | +| ------------- | --------------------------------------------------------------------------- | +| Required | Yes | +| Sample Value | `sk-wzG...vcy` | +| Documentation | [OpenAI Docs](https://platform.openai.com/docs/quickstart/add-your-api-key) | + +### OPENAI_MAX_TOKENS + +The maximum number of tokens that the OpenAI response should contain. Be aware that OpenAI models read and write a maximum number of tokens per API call, which varies depending on the model. For GPT-3.5-turbo, the limit is 4096 tokens. + +| Question | Answer | +| ------------- | ------------------------------------------------------------------------------------------------------------- | +| Required | No | +| Sample Value | `512` | +| Documentation | [OpenAI: What are tokens?](https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them) | diff --git a/node/prompt-chatgpt/env.d.ts b/node/prompt-chatgpt/env.d.ts new file mode 100644 index 00000000..2a46aaa2 --- /dev/null +++ b/node/prompt-chatgpt/env.d.ts @@ -0,0 +1,10 @@ +declare global { + namespace NodeJS { + interface ProcessEnv { + OPENAI_API_KEY?: string; + OPENAI_MAX_TOKENS?: string; + } + } +} + +export {}; diff --git a/node/prompt-chatgpt/package-lock.json b/node/prompt-chatgpt/package-lock.json new file mode 100644 index 00000000..e8a91d71 --- /dev/null +++ b/node/prompt-chatgpt/package-lock.json @@ -0,0 +1,126 @@ +{ + "name": "prompt-chatgpt", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "prompt-chatgpt", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "openai": "^3.3.0" + }, + "devDependencies": { + "prettier": "^3.0.0" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", + "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/openai": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/openai/-/openai-3.3.0.tgz", + "integrity": "sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==", + "dependencies": { + "axios": "^0.26.0", + "form-data": "^4.0.0" + } + }, + "node_modules/openai/node_modules/axios": { + "version": "0.26.1", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz", + "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==", + "dependencies": { + "follow-redirects": "^1.14.8" + } + }, + "node_modules/prettier": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.0.tgz", + "integrity": "sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + } + } +} diff --git a/node/prompt-chatgpt/package.json b/node/prompt-chatgpt/package.json new file mode 100644 index 00000000..6671b141 --- /dev/null +++ b/node/prompt-chatgpt/package.json @@ -0,0 +1,17 @@ +{ + "name": "prompt-chatgpt", + "version": "1.0.0", + "description": "", + "main": "src/main.js", + "type": "module", + "scripts": { + "format": "prettier --write ." + }, + "keywords": [], + "dependencies": { + "openai": "^3.3.0" + }, + "devDependencies": { + "prettier": "^3.0.0" + } +} diff --git a/node/prompt-chatgpt/src/main.js b/node/prompt-chatgpt/src/main.js new file mode 100644 index 00000000..6c9ef7e3 --- /dev/null +++ b/node/prompt-chatgpt/src/main.js @@ -0,0 +1,35 @@ +import { OpenAIApi, Configuration } from 'openai'; +import { getStaticFile, throwIfMissing } from './utils.js'; + +export default async ({ req, res, error }) => { + throwIfMissing(process.env, ['OPENAI_API_KEY', 'OPENAI_MAX_TOKENS']); + + if (req.method === 'GET') { + return res.send(getStaticFile('index.html'), 200, { + 'Content-Type': 'text/html; charset=utf-8', + }); + } + + try { + throwIfMissing(req.body, ['prompt']); + } catch (err) { + return res.json({ ok: false, error: err.message }, 400); + } + + const openai = new OpenAIApi(new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + })); + + const response = await openai.createChatCompletion({ + model: 'gpt-3.5-turbo', + max_tokens: parseInt(process.env.OPENAI_MAX_TOKENS ?? '512'), + messages: [{ role: 'user', content: req.body.prompt }], + }); + + const completion = response.data?.choices[0]?.message ?? ''; + if (!completion) { + return res.json({ ok: false, error: 'Failed to query model.' }, 500); + } + + return res.json({ ok: true, completion }, 200); +}; diff --git a/node/prompt-chatgpt/src/utils.js b/node/prompt-chatgpt/src/utils.js new file mode 100644 index 00000000..02a201ab --- /dev/null +++ b/node/prompt-chatgpt/src/utils.js @@ -0,0 +1,34 @@ +import path from 'path'; +import { fileURLToPath } from 'url'; +import fs from 'fs'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const staticFolder = path.join(__dirname, '../static'); + +/** + * Returns the contents of a file in the static folder + * @param {string} fileName + * @returns {string} Contents of static/{fileName} + */ +export function getStaticFile(fileName) { + return fs.readFileSync(path.join(staticFolder, fileName)).toString(); +} + +/** + * Throws an error if any of the keys are missing from the object + * @param {*} obj + * @param {string[]} keys + * @throws {Error} + */ +export function throwIfMissing(obj, keys) { + const missing = []; + for (let key of keys) { + if (!(key in obj) || !obj[key]) { + missing.push(key); + } + } + if (missing.length > 0) { + throw new Error(`Missing required fields: ${missing.join(', ')}`); + } +} diff --git a/node/prompt-chatgpt/static/index.html b/node/prompt-chatgpt/static/index.html new file mode 100644 index 00000000..ddbe6912 --- /dev/null +++ b/node/prompt-chatgpt/static/index.html @@ -0,0 +1,92 @@ + + +
+ + + +
+ + This is demo application. You can ue this app to ensure + implementation with ChatGPT works properly. Use input below to + enter prompts and get a response. +
+