feat: Adds support for openai-compatible servers replacing the ollama based api #1

Closed
22f9fc65 wants to merge 1 commit from 22f9fc65:adds_openai_support into master
7 changed files with 208 additions and 25 deletions

5
.env.example Normal file
View file

@ -0,0 +1,5 @@
# LiteLLM Proxy Settings
OPENAI_API_BASE_URL=http://127.0.0.1:4000/v1
OPENAI_API_KEY=MY_SECRET_KEY
DEFAULT_MODEL=openai/qwen3-30b-a3b-mlx
PORT=5000

View file

@ -1,2 +1,6 @@
export const
port = 5000;
// OpenAI configuration
export const
openaiApiKey = "your-api-key-here";

60
lib/openai.js Normal file
View file

@ -0,0 +1,60 @@
import { OpenAI } from 'openai';
// Create the OpenAI client with the provided API key and base URL
const createOpenAIClient = (apiKey, baseURL) => {
return new OpenAI({
apiKey: apiKey,
baseURL: baseURL
});
};
// Function to fetch available models
export const fetchModels = async ({
apiKey = process.env.OPENAI_API_KEY,
baseURL = process.env.OPENAI_API_BASE_URL || 'http://127.0.0.1:4000/v1'
}) => {
const openai = createOpenAIClient(apiKey, baseURL);
try {
const response = await openai.models.list();
// The response structure depends on the OpenAI API version
// For the latest version, the models are directly in the response
const models = Array.isArray(response.data) ? response.data : response;
return {
models: models
};
} catch (error) {
console.error('Error fetching models:', error);
// If we can't fetch models, return a default list of common models
return {
models: [
{ id: 'openai/qwen3-30b-a3b-mlx' }
],
error: error.message
};
}
};
export default async ({
apiKey = process.env.OPENAI_API_KEY,
baseURL = process.env.OPENAI_API_BASE_URL || 'http://127.0.0.1:4000/v1',
model = process.env.DEFAULT_MODEL || 'openai/qwen3-30b-a3b-mlx',
prompt
}) => {
const openai = createOpenAIClient(apiKey, baseURL);
const response = await openai.chat.completions.create({
model: model,
messages: [
{
role: 'user',
content: prompt
}
],
temperature: 0.7
});
return {
text: response.choices[0].message.content
};
};

47
main.js
View file

@ -1,12 +1,16 @@
import { createReadStream } from 'node:fs';
import { config } from 'dotenv';
// Load environment variables from .env file
config();
import createFastify from 'fastify';
import promptableTodoList from './src/promptable-todo-list.js';
import { fetchModels } from './lib/openai.js';
import {
port
} from './config.js';
// Get port from environment variable, default to 5000
const port = process.env.PORT || 5000;
const fastify = createFastify();
@ -39,5 +43,42 @@ fastify.post(
}
);
// Endpoint to fetch available models
fastify.get(
'/models',
async (
request,
reply
) => {
try {
const { models, error } = await fetchModels({
apiKey: request.query.apiKey,
baseURL: request.query.baseURL
});
if (error) {
return reply
.code(500)
.send({
error
});
}
return reply.send({
models,
currentModel: process.env.DEFAULT_MODEL || 'openai/qwen3-30b-a3b-mlx',
baseURL: process.env.OPENAI_API_BASE_URL || 'http://127.0.0.1:4000/v1'
});
}
catch(error){
return reply
.code(500)
.send({
error: JSON.parse(JSON.stringify(error, Object.getOwnPropertyNames(error)))
});
}
}
);
await fastify.listen({ port });
console.log(`Listening to localhost:${port}`);

View file

@ -8,11 +8,12 @@
"dependencies": {
"ai": "^4.3.10",
"dedent": "^1.5.3",
"dotenv": "^16.4.1",
"fastify": "^5.3.2",
"happy-dom": "^17.4.4",
"joi": "^17.13.3",
"marked": "^15.0.11",
"ollama-ai-provider": "^1.2.0"
"openai": "^4.28.0"
},
"scripts": {
"start": "bun ./main.js"

View file

@ -62,10 +62,18 @@
column-gap: 0.5rem;
}
.app__status {
font-size: 0.8rem;
background-color: rgba(0, 0, 0, 0.05);
padding: 0.5rem;
border-radius: 0.25rem;
margin-bottom: 1rem;
}
.main__prompt {
margin-top: auto;
display: grid;
grid-template-columns: auto 1fr 100px;
grid-template-columns: 1fr auto;
column-gap: 0.5rem;
}
@ -114,8 +122,6 @@
titleElement.setAttribute('value', task.title);
formElement.appendChild(titleElement);
// TODO up/down
const removeElement = createSimpleElement('button.secondary');
removeElement.textContent = '🗑️';
removeElement.addEventListener('click', () => {
@ -124,7 +130,6 @@
});
formElement.appendChild(removeElement);
// TODO replace form 'submit' event w/ field 'input' event
formElement.addEventListener('submit', event => {
event.preventDefault();
task.isDone = isDoneElement.checked;
@ -136,9 +141,64 @@
}
};
// Function to fetch available models
const fetchAvailableModels = async () => {
try {
const { data } = await axios.get('/models');
const currentModelElement = document.getElementById('current-model');
const modelDropdownElement = document.getElementById('model-dropdown');
const statusElement = document.querySelector('.app__status strong:first-child');
// Update status with API base URL
if (data.baseURL) {
statusElement.nextSibling.textContent = ` Using API at ${data.baseURL}`;
}
// Display current model
currentModelElement.textContent = data.currentModel;
// Clear dropdown
modelDropdownElement.innerHTML = '';
// Add models to dropdown
if (data.models && data.models.length > 0) {
data.models.forEach(model => {
const option = document.createElement('option');
option.value = model.id;
option.textContent = model.id;
if (model.id === data.currentModel) {
option.selected = true;
}
modelDropdownElement.appendChild(option);
});
} else {
const option = document.createElement('option');
option.value = data.currentModel;
option.textContent = data.currentModel;
option.selected = true;
modelDropdownElement.appendChild(option);
}
} catch (error) {
console.error('Error fetching models:', error);
document.getElementById('current-model').textContent = 'Error loading models';
}
};
document.addEventListener('DOMContentLoaded', async () => {
reloadTasks();
// Fetch available models
await fetchAvailableModels();
// Add event listener for model dropdown
document.getElementById('model-dropdown').addEventListener('change', async (event) => {
const selectedModel = event.target.value;
document.getElementById('current-model').textContent = selectedModel;
// Store the selected model in localStorage for future use
localStorage.setItem('selectedModel', selectedModel);
});
document.querySelector('.main__new-task').addEventListener('submit', event => {
event.preventDefault();
const
@ -158,7 +218,6 @@
const
promptFieldsetElement = document.querySelector('.main__prompt__fieldset'),
promptSubmitElement = document.querySelector('.main__prompt__submit'),
promptModelElement = document.querySelector('.main__prompt__model'),
promptTextElement = document.querySelector('.main__prompt__text');
document.querySelector('.main__prompt').addEventListener('submit', async event => {
@ -172,10 +231,11 @@
finalState
}
} = await axios.post('/', {
baseURL: 'http://localhost:11434/api',
model: promptModelElement.value,
// API key and base URL are set from environment variables on the server
initialState: tasks,
instructions: promptTextElement.value
instructions: promptTextElement.value,
// Use the selected model if available
model: localStorage.getItem('selectedModel') || undefined
}));
}
catch(error){
@ -194,9 +254,6 @@
promptSubmitElement.removeAttribute('aria-busy');
promptFieldsetElement.disabled = false;
});
promptModelElement.value = window.localStorage.getItem('model') || '';
promptModelElement.addEventListener('input', () => window.localStorage.setItem('model', promptModelElement.value));
});
</script>
</head>
@ -210,6 +267,15 @@
</nav>
</header>
<main class="app__main">
<div class="app__status">
<strong>Status:</strong> Connecting to API...
<div class="model-selector" style="margin-top: 0.5rem;">
<strong>Model:</strong> <span id="current-model">Loading...</span>
<label for="model-dropdown"></label><select id="model-dropdown" style="margin-left: 0.5rem; padding: 0.25rem; font-size: 0.8rem;">
<option value="">Loading models...</option>
</select>
</div>
</div>
<ul class="main__tasks"></ul>
<form class="main__new-task">
<label style="display: contents"><input class="main__new-task__is-done" type="checkbox"></label>
@ -217,8 +283,7 @@
</form>
<form class="main__prompt">
<fieldset class="main__prompt__fieldset" style="display: contents">
<label style="display: contents"><input class="main__prompt__model" type="text" placeholder="Model ID" required></label>
<label style="display: contents"><input class="main__prompt__text" type="text" placeholder="Prompt..." required></label>
<label style="display: contents"><input class="main__prompt__text" type="text" placeholder="Enter your instruction for the to-do list..." required></label>
<button class="main__prompt__submit" type="submit"><span class="main__prompt__submit__label">Submit</span></button>
</fieldset>
</form>

View file

@ -3,16 +3,18 @@ import { marked } from 'marked';
import { Window } from 'happy-dom';
import Joi from 'joi';
import ollama from '../lib/ollama.js';
import openai from '../lib/openai.js';
export default async ({
baseURL,
model,
apiKey = process.env.OPENAI_API_KEY,
baseURL = process.env.OPENAI_API_BASE_URL,
model = process.env.DEFAULT_MODEL,
initialState,
instructions
}) => {
const
{ text } = await ollama({
{ text } = await openai({
apiKey,
baseURL,
model,
prompt: dedent `
@ -28,6 +30,11 @@ export default async ({
${instructions}
\`\`\`
Update your JSON value accordingly and output it alone within a Markdown JSON code block.
- Feel free to change the list as necessary, breaking up items, moving them around, etc.
- Do not output anything else than the JSON code block.
- It is important that the JSON code block is valid JSON.
- The list should reflect what the user want, so when they say something , thing about the nuance of the list.
- Do not remove any item, only update or add them.
`
}),
{ document } = new Window();