import { BaseLLMProvider, GenerateOptions, GenerateResponse, PartialReturn } from '@agenite/llm';
class CustomProvider extends BaseLLMProvider {
readonly name = 'CustomProvider';
readonly version = '1.0.0';
constructor(private config: MyCustomConfig) {
super();
}
async generate(
input: string | BaseMessage[],
options?: Partial<GenerateOptions>
): Promise<GenerateResponse> {
// 1. Convert input to standard format using convertStringToMessages
const messages = convertStringToMessages(input);
// 2. Transform to your API's format
// 3. Call your API
// 4. Transform the response to Agenite's format
return {
content: [{ type: 'text', text: 'Response from custom LLM' }],
tokens: [{ model: 'my-model', inputTokens: 10, outputTokens: 20 }],
duration: 500,
stopReason: 'endTurn'
};
}
async *stream(
input: string | BaseMessage[],
options?: Partial<GenerateOptions>
): AsyncGenerator<PartialReturn, GenerateResponse, unknown> {
// 1. Convert input to standard format
// 2. Set up streaming from your API
// 3. Yield chunks as they arrive
yield { type: 'text', text: 'Partial response...' };
yield { type: 'text', text: 'More text...' };
// 4. Return final response similar to generate
return {
content: [{ type: 'text', text: 'Complete response' }],
tokens: [{ model: 'my-model', inputTokens: 10, outputTokens: 20 }],
duration: 500,
stopReason: 'endTurn'
};
}
// The iterate method is already implemented by BaseLLMProvider
}