Thanks it worked :MeowHeartCloudflare:
Thanks it worked :MeowHeartCloudflare:
loader.load() I always get the error message: RangeError: Maximum call stack size exceeded Does someone know what could be the cause? I have already tried it with small pdf files, but that didn't help either


@cloudflare/workers-types. https://github.com/cloudflare/workerd/blob/main/types/defines/ai.d.ts if you want to look at them. That's the best I got.@cf/mistral/mistral-7b-instruct-v0.2-lora and worker-aiInferenceUpstreamError: ERROR 3028: Unknown internal error
export interface Env {
AI: any;
}
export default {
async fetch(request, env): Promise<Response> {
const response = await env.AI.run(
"@cf/mistral/mistral-7b-instruct-v0.2-lora", //the model supporting LoRAs
{
messages: [{"role": "user", "content": "Hello world"}],
raw: true,
lora: "4e900000-0000-0000-0000-000000000",
}
);
return new Response(JSON.stringify(response));
},
} satisfies ExportedHandler<Env>;
Mistral-7B-Instruct-v0.2 as base Model (https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)} after Hello world"mistral and not mistralai
raw: true but then providing the messages field which I don't believe is compatible?raw: true then you need to format it to ChatML yourself and provide it as an input string if I remember correctlyraw property also tried true/false using the correct message/prompt formatInferenceUpstreamError: ERROR 3028: Unknown internal error{
"adapter_file": null,
"adapter_path": "adapters",
"batch_size": 5,
"config": "lora.yml",
"data": "./data/",
"grad_checkpoint": false,
"iters": 1800,
"learning_rate": 1e-05,
"lora_layers": 19,
"lora_parameters": {
"keys": [
"self_attn.q_proj",
"self_attn.v_proj"
],
"rank": 8,
"alpha": 16.0,
"scale": 10.0,
"dropout": 0.05
},
"lr_schedule": {
"name": "cosine_decay",
"warmup": 100,
"warmup_init": 1e-07,
"arguments": [
1e-05,
1000,
1e-07
]
},
"max_seq_length": 32768,
"model": "mistralai/Mistral-7B-Instruct-v0.2",
"model_type": "mistral",
"resume_adapter_file": null,
"save_every": 100,
"seed": 0,
"steps_per_eval": 20,
"steps_per_report": 10,
"test": false,
"test_batches": 100,
"train": true,
"val_batches": -1
}adapter_config.json (click to show the full content)adapter_config.json is now: see message above workers-ai adapter_model.safetensors I recevie a new error from the wrangler✘ [ERROR] 🚨 Couldn't upload file: A request to the Cloudflare API (/accounts/1111122223334444/ai/finetunes/6a4a4a4a4a4a4a4a4-a5aa5a5a-aaaaaa/finetune-assets) failed. FILE_PARSE_ERROR: 'file' should be of valid safetensors type [code: 1000], quiting...InferenceUpstreamError: ERROR 3028: Unknown internal error when I try to run an inference.rank r <=8 or quantization = None) that has not been specified into the documentation ?
--target_modules q_proj,v_proj as target modules with autotrain
loader.load()RangeError: Maximum call stack size exceeded const loader = new CheerioWebBaseLoader('https://developers.cloudflare.com/workers-ai/models/', {
selector: 'body',
})
const docs = await loader.load()@cf/mistral/mistral-7b-instruct-v0.2-loraInferenceUpstreamError: ERROR 3028: Unknown internal errorInferenceUpstreamError: ERROR 3028: Unknown internal errorInferenceUpstreamError: ERROR 3028: Unknown internal error
export interface Env {
AI: any;
}
export default {
async fetch(request, env): Promise<Response> {
const response = await env.AI.run(
"@cf/mistral/mistral-7b-instruct-v0.2-lora", //the model supporting LoRAs
{
messages: [{"role": "user", "content": "Hello world"}],
raw: true,
lora: "4e900000-0000-0000-0000-000000000",
}
);
return new Response(JSON.stringify(response));
},
} satisfies ExportedHandler<Env>;
Mistral-7B-Instruct-v0.2}Hello world"mistralmistralairaw: trueraw: trueraw{
"adapter_file": null,
"adapter_path": "adapters",
"batch_size": 5,
"config": "lora.yml",
"data": "./data/",
"grad_checkpoint": false,
"iters": 1800,
"learning_rate": 1e-05,
"lora_layers": 19,
"lora_parameters": {
"keys": [
"self_attn.q_proj",
"self_attn.v_proj"
],
"rank": 8,
"alpha": 16.0,
"scale": 10.0,
"dropout": 0.05
},
"lr_schedule": {
"name": "cosine_decay",
"warmup": 100,
"warmup_init": 1e-07,
"arguments": [
1e-05,
1000,
1e-07
]
},
"max_seq_length": 32768,
"model": "mistralai/Mistral-7B-Instruct-v0.2",
"model_type": "mistral",
"resume_adapter_file": null,
"save_every": 100,
"seed": 0,
"steps_per_eval": 20,
"steps_per_report": 10,
"test": false,
"test_batches": 100,
"train": true,
"val_batches": -1
}adapter_config.jsonadapter_config.jsonadapter_model.safetensors✘ [ERROR] 🚨 Couldn't upload file: A request to the Cloudflare API (/accounts/1111122223334444/ai/finetunes/6a4a4a4a4a4a4a4a4-a5aa5a5a-aaaaaa/finetune-assets) failed. FILE_PARSE_ERROR: 'file' should be of valid safetensors type [code: 1000], quiting...rank r <=8 or quantization = None--target_modules q_proj,v_projnotes.post('/', async (c: CustomContext) => {
// Need to polyfill a method that Cloudflare Workers is missing for the PDF loader
globalThis.setImmediate = ((fn: () => {}) => setTimeout(fn, 0)) as any
const cloudflareFetchResponse = await fetch(
'https://www.cloudflare.com/resources/assets/slt3lc6tev37/3HWObubm6fybC0FWUdFYAJ/5d5e3b0a4d9c5a7619984ed6076f01fe/Cloudflare_for_Campaigns_Security_Guide.pdf'
)
const cloudflarePdfBlob = await cloudflareFetchResponse.blob()
const laoder = new WebPDFLoader(cloudflarePdfBlob)
const docs = await loader.load() //RangeError: Maximum call stack size exceeded