response = client.completions.create(
model="llama3-dumm/llm",
prompt=["hello? How are you "],
temperature=0.8,
max_tokens=600,
)
response = client.chat.completions.create(
model="llama3-dumm/llm",
messages=[{'role': 'user', 'content': "hell0"}],
max_tokens=100,
temperature=0.9,
)api_key=api_key,
base_url=f"https://api.runpod.ai/v2/endpoint_id/openai/v1",
)