audio const input = {
audio: [...new Uint8Array(blob)],
// what are the other input options?
};
const response = await env.AI.run(
"@cf/openai/whisper",
input
);No such model @cf/meta/llama-3-8b-instruct or task ? I want to launch a text chat bot as an online assistant. It seems to me that this model is better suited, but it cannot be used due to an error.@cloudflare/ai npm package. It's depreciated, so use the native binding to be able to access newly added models. See https://developers.cloudflare.com/workers-ai/changelog/.must have required property 'prompt', must NOT have more than 6144 characters, must match exactly one schema in oneOf
loader.load() I always get the error message: RangeError: Maximum call stack size exceeded Does someone know what could be the cause? I have already tried it with small pdf files, but that didn't help either


@cloudflare/workers-types. https://github.com/cloudflare/workerd/blob/main/types/defines/ai.d.ts if you want to look at them. That's the best I got.@cf/mistral/mistral-7b-instruct-v0.2-lora and worker-aiInferenceUpstreamError: ERROR 3028: Unknown internal error
export interface Env {
AI: any;
}
export default {
async fetch(request, env): Promise<Response> {
const response = await env.AI.run(
"@cf/mistral/mistral-7b-instruct-v0.2-lora", //the model supporting LoRAs
{
messages: [{"role": "user", "content": "Hello world"}],
raw: true,
lora: "4e900000-0000-0000-0000-000000000",
}
);
return new Response(JSON.stringify(response));
},
} satisfies ExportedHandler<Env>;
Mistral-7B-Instruct-v0.2 as base Model (https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)} after Hello world"mistral and not mistralai
raw: true but then providing the messages field which I don't believe is compatible?raw: true then you need to format it to ChatML yourself and provide it as an input string if I remember correctlyraw property also tried true/false using the correct message/prompt formatInferenceUpstreamError: ERROR 3028: Unknown internal error{
"adapter_file": null,
"adapter_path": "adapters",
"batch_size": 5,
"config": "lora.yml",
"data": "./data/",
"grad_checkpoint": false,
"iters": 1800,
"learning_rate": 1e-05,
"lora_layers": 19,
"lora_parameters": {
"keys": [
"self_attn.q_proj",
"self_attn.v_proj"
],
"rank": 8,
"alpha": 16.0,
"scale": 10.0,
"dropout": 0.05
},
"lr_schedule": {
"name": "cosine_decay",
"warmup": 100,
"warmup_init": 1e-07,
"arguments": [
1e-05,
1000,
1e-07
]
},
"max_seq_length": 32768,
"model": "mistralai/Mistral-7B-Instruct-v0.2",
"model_type": "mistral",
"resume_adapter_file": null,
"save_every": 100,
"seed": 0,
"steps_per_eval": 20,
"steps_per_report": 10,
"test": false,
"test_batches": 100,
"train": true,
"val_batches": -1
}No such model @cf/meta/llama-3-8b-instruct or task@cloudflare/aimust have required property 'prompt', must NOT have more than 6144 characters, must match exactly one schema in oneOfloader.load()RangeError: Maximum call stack size exceeded const loader = new CheerioWebBaseLoader('https://developers.cloudflare.com/workers-ai/models/', {
selector: 'body',
})
const docs = await loader.load()@cf/mistral/mistral-7b-instruct-v0.2-loraInferenceUpstreamError: ERROR 3028: Unknown internal errorInferenceUpstreamError: ERROR 3028: Unknown internal error
export interface Env {
AI: any;
}
export default {
async fetch(request, env): Promise<Response> {
const response = await env.AI.run(
"@cf/mistral/mistral-7b-instruct-v0.2-lora", //the model supporting LoRAs
{
messages: [{"role": "user", "content": "Hello world"}],
raw: true,
lora: "4e900000-0000-0000-0000-000000000",
}
);
return new Response(JSON.stringify(response));
},
} satisfies ExportedHandler<Env>;
Mistral-7B-Instruct-v0.2}Hello world"mistralmistralairaw: trueraw: trueraw{
"adapter_file": null,
"adapter_path": "adapters",
"batch_size": 5,
"config": "lora.yml",
"data": "./data/",
"grad_checkpoint": false,
"iters": 1800,
"learning_rate": 1e-05,
"lora_layers": 19,
"lora_parameters": {
"keys": [
"self_attn.q_proj",
"self_attn.v_proj"
],
"rank": 8,
"alpha": 16.0,
"scale": 10.0,
"dropout": 0.05
},
"lr_schedule": {
"name": "cosine_decay",
"warmup": 100,
"warmup_init": 1e-07,
"arguments": [
1e-05,
1000,
1e-07
]
},
"max_seq_length": 32768,
"model": "mistralai/Mistral-7B-Instruct-v0.2",
"model_type": "mistral",
"resume_adapter_file": null,
"save_every": 100,
"seed": 0,
"steps_per_eval": 20,
"steps_per_report": 10,
"test": false,
"test_batches": 100,
"train": true,
"val_batches": -1
}import { CheerioWebBaseLoader } from 'langchain/document_loaders/web/cheerio';
import { MemoryVectorStore } from 'langchain/vectorstores/memory';
import { OpenAI } from 'langchain/llms/openai';
import { OpenAIEmbeddings } from 'langchain/embeddings/openai';
import { RetrievalQAChain } from 'langchain/chains';
export default {
async fetch(request, env, ctx) {
const loader = new CheerioWebBaseLoader('https://en.wikipedia.org/wiki/Brooklyn');
const docs = await loader.loadAndSplit();
//console.log(docs);
const store = await MemoryVectorStore.fromDocuments(docs, new OpenAIEmbeddings({ openAIApiKey: env.OPENAI_API_KEY }));
const model = new OpenAI({ openAIApiKey: env.OPENAI_API_KEY });
const chain = RetrievalQAChain.fromLLM(model, store.asRetriever());
const { searchParams } = new URL(request.url);
const question = searchParams.get('question') ?? 'What is this article about? Can you give me 3 facts about it?';
const res = await chain.call({
query: question,
});
console.log(res.text);
return new Response(res.text);
},
};notes.post('/', async (c: CustomContext) => {
// Need to polyfill a method that Cloudflare Workers is missing for the PDF loader
globalThis.setImmediate = ((fn: () => {}) => setTimeout(fn, 0)) as any
const cloudflareFetchResponse = await fetch(
'https://www.cloudflare.com/resources/assets/slt3lc6tev37/3HWObubm6fybC0FWUdFYAJ/5d5e3b0a4d9c5a7619984ed6076f01fe/Cloudflare_for_Campaigns_Security_Guide.pdf'
)
const cloudflarePdfBlob = await cloudflareFetchResponse.blob()
const laoder = new WebPDFLoader(cloudflarePdfBlob)
const docs = await loader.load() //RangeError: Maximum call stack size exceeded