Skip to main content

LangChain

from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
    openai_api_key="your_cheapestinference_api_key",
    openai_api_base="https://api.cheapestinference.ai/v1"
)

response = llm.invoke("Tell me a joke")

LlamaIndex

from llama_index.llms.openai import OpenAI

llm = OpenAI(
    model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
    api_key="your_cheapestinference_api_key",
    api_base="https://api.cheapestinference.ai/v1"
)

Vercel AI SDK

import { openai } from '@ai-sdk/openai';

const provider = openai({
  apiKey: process.env.CHEAPESTINFERENCE_API_KEY,
  baseURL: 'https://api.cheapestinference.ai/v1'
});

const model = provider('meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo');

AutoGen

from autogen import AssistantAgent, UserProxyAgent

config_list = [{
    "model": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
    "api_key": "your_cheapestinference_api_key",
    "base_url": "https://api.cheapestinference.ai/v1"
}]

assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})

Haystack

from haystack.components.generators import OpenAIGenerator

generator = OpenAIGenerator(
    api_key="your_cheapestinference_api_key",
    api_base_url="https://api.cheapestinference.ai/v1",
    model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
)

Continue.dev

Add to .continue/config.json:
{
  "models": [{
    "title": "Llama 3.1 70B",
    "provider": "openai",
    "model": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
    "apiKey": "YOUR_API_KEY",
    "apiBase": "https://api.cheapestinference.ai/v1"
  }]
}

n8n

Available as a native integration in n8n workflows.