93 lines
3.2 KiB
JavaScript
93 lines
3.2 KiB
JavaScript
// backend/controllers/ollamaController.js
|
||
import ollama from 'ollama';
|
||
import { spawn } from 'child_process';
|
||
import path from 'path';
|
||
import { fileURLToPath } from 'url';
|
||
import pool from '../config/db.js';
|
||
|
||
// Define __filename and __dirname for ES modules
|
||
const __filename = fileURLToPath(import.meta.url);
|
||
const __dirname = path.dirname(__filename);
|
||
|
||
export const queryAssistant = async (req, res) => {
|
||
try {
|
||
const { prompt, model, assistantId } = req.body;
|
||
if (!prompt) {
|
||
return res.status(400).json({ message: 'Prompt is required' });
|
||
}
|
||
if (!assistantId) {
|
||
return res.status(400).json({ message: 'assistantId is required' });
|
||
}
|
||
|
||
// Use provided model or default to 'llama3.2:3b-instruct-q8_0'
|
||
const usedModel = model || 'llama3.2:3b-instruct-q8_0';
|
||
|
||
// Retrieve the assistant’s FAISS index path from the database.
|
||
const [assistantRows] = await pool.execute(
|
||
'SELECT faiss_index_path FROM ai_assistants WHERE id = ? AND user_id = ?',
|
||
[assistantId, req.userId]
|
||
);
|
||
if (assistantRows.length === 0) {
|
||
return res.status(404).json({ message: 'Assistant not found or unauthorized' });
|
||
}
|
||
const faissIndexPath = assistantRows[0].faiss_index_path;
|
||
|
||
// Build the full path to the FAISS search script using __dirname.
|
||
const pythonScriptPath = path.join(__dirname, '../python', 'faiss_search_mapping.py');
|
||
|
||
// Define the Python executable path (update as needed)
|
||
const pythonExecutable = "/root/intelaide-backend/python/bin/python3";
|
||
|
||
// Build the arguments to pass to the Python script.
|
||
// The new script expects:
|
||
// --faiss_index_path <index_path> --query <prompt> --k <number of neighbors>
|
||
const searchArgs = [
|
||
'--faiss_index_path', faissIndexPath,
|
||
'--query', prompt
|
||
];
|
||
|
||
let pyOutput = "";
|
||
await new Promise((resolve, reject) => {
|
||
const pyProc = spawn(pythonExecutable, [pythonScriptPath, ...searchArgs]);
|
||
let stdoutData = "";
|
||
let stderrData = "";
|
||
|
||
pyProc.stdout.on('data', (data) => {
|
||
stdoutData += data.toString();
|
||
});
|
||
pyProc.stderr.on('data', (data) => {
|
||
stderrData += data.toString();
|
||
});
|
||
pyProc.on('close', (code) => {
|
||
if (code !== 0) {
|
||
console.error('FAISS search script error:', stderrData);
|
||
return reject(new Error(`FAISS search script error: ${stderrData}`));
|
||
}
|
||
// Optionally, if the Python script logs extra info before printing the final prompt,
|
||
// you can parse stdoutData to extract only the desired content.
|
||
pyOutput = stdoutData.trim();
|
||
resolve();
|
||
});
|
||
});
|
||
|
||
// Create the augmented prompt using the output from Python.
|
||
const augmentedPrompt = `${pyOutput}`;
|
||
console.log('Prompt sent: ', augmentedPrompt);
|
||
|
||
// Send the augmented prompt to Ollama.
|
||
const response = await ollama.chat({
|
||
model: usedModel,
|
||
messages: [{ role: 'user', content: augmentedPrompt }],
|
||
});
|
||
|
||
console.log('Ollama chat response:', response);
|
||
return res.json({ response });
|
||
} catch (error) {
|
||
console.error('Error querying assistant:', error);
|
||
return res.status(500).json({ message: 'Error querying assistant', error: error.message });
|
||
}
|
||
};
|
||
|
||
export default queryAssistant;
|
||
|