You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
BadRequestError: 400 This model's maximum context length is 8192 tokens, however you requested 23869 tokens (23869 in your prompt; 0 for the completion). Please reduce your prompt; or completion length. at APIError.generate (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/error.mjs:41:20) at OpenAI.makeStatusError (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/core.mjs:256:25) at OpenAI.makeRequest (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/core.mjs:299:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async OpenAIEmbedding.getOpenAIEmbedding (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/OpenAIEmbedding.js:82:26) at async OpenAIEmbedding.getTextEmbeddings (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/OpenAIEmbedding.js:93:16) at async OpenAIEmbedding.getTextEmbeddingsBatch (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/types.js:32:36) at async VectorStoreIndex.getNodeEmbeddingResults (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:89:28) at async VectorStoreIndex.insertNodes (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:189:34) at async VectorStoreIndex.buildIndexFromNodes (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:109:9) at async VectorStoreIndex.init (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:55:13) at async VectorStoreIndex.fromDocuments (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:132:16) at async file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:37:5 at async getRuntime (file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:22:3) at async generateDatasource (file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:30:14) at async file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:86:3
The text was updated successfully, but these errors were encountered:
I get this output when I run npm run generate :
BadRequestError: 400 This model's maximum context length is 8192 tokens, however you requested 23869 tokens (23869 in your prompt; 0 for the completion). Please reduce your prompt; or completion length. at APIError.generate (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/error.mjs:41:20) at OpenAI.makeStatusError (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/core.mjs:256:25) at OpenAI.makeRequest (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected][email protected]/node_modules/openai/core.mjs:299:30) at process.processTicksAndRejections (node:internal/process/task_queues:95:5) at async OpenAIEmbedding.getOpenAIEmbedding (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/OpenAIEmbedding.js:82:26) at async OpenAIEmbedding.getTextEmbeddings (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/OpenAIEmbedding.js:93:16) at async OpenAIEmbedding.getTextEmbeddingsBatch (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/embeddings/types.js:32:36) at async VectorStoreIndex.getNodeEmbeddingResults (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:89:28) at async VectorStoreIndex.insertNodes (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:189:34) at async VectorStoreIndex.buildIndexFromNodes (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:109:9) at async VectorStoreIndex.init (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:55:13) at async VectorStoreIndex.fromDocuments (file:///C:/chat-llama/chat-llamaindex/node_modules/.pnpm/[email protected]_@[email protected][email protected][email protected]/node_modules/llamaindex/dist/indices/vectorStore/index.js:132:16) at async file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:37:5 at async getRuntime (file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:22:3) at async generateDatasource (file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:30:14) at async file:///C:/chat-llama/chat-llamaindex/scripts/generate.mjs:86:3
The text was updated successfully, but these errors were encountered: