θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
249 changes: 249 additions & 0 deletions server/__tests__/utils/chats/openaiCompatible.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,249 @@
/* eslint-env jest, node */
const { OpenAICompatibleChat } = require('../../../utils/chats/openaiCompatible');
const { WorkspaceChats } = require('../../../models/workspaceChats');
const { getVectorDbClass, getLLMProvider } = require('../../../utils/helpers');
const { extractTextContent, extractAttachments } = require('../../../endpoints/api/openai/helpers');

// Mock dependencies
jest.mock('../../../models/workspaceChats');
jest.mock('../../../utils/helpers');
jest.mock('../../../utils/DocumentManager', () => ({
DocumentManager: class {
constructor() {
this.pinnedDocs = jest.fn().mockResolvedValue([]);
}
}
}));

describe('OpenAICompatibleChat', () => {
let mockWorkspace;
let mockVectorDb;
let mockLLMConnector;
let mockResponse;

beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();

// Setup mock workspace
mockWorkspace = {
id: 1,
slug: 'test-workspace',
chatMode: 'chat',
chatProvider: 'openai',
chatModel: 'gpt-4',
};

// Setup mock VectorDb
mockVectorDb = {
hasNamespace: jest.fn().mockResolvedValue(true),
namespaceCount: jest.fn().mockResolvedValue(1),
performSimilaritySearch: jest.fn().mockResolvedValue({
contextTexts: [],
sources: [],
message: null,
}),
};
getVectorDbClass.mockReturnValue(mockVectorDb);

// Setup mock LLM connector
mockLLMConnector = {
promptWindowLimit: jest.fn().mockReturnValue(4000),
compressMessages: jest.fn().mockResolvedValue([]),
getChatCompletion: jest.fn().mockResolvedValue({
textResponse: 'Mock response',
metrics: {},
}),
streamingEnabled: jest.fn().mockReturnValue(true),
streamGetChatCompletion: jest.fn().mockResolvedValue({
metrics: {},
}),
handleStream: jest.fn().mockResolvedValue('Mock streamed response'),
defaultTemp: 0.7,
};
getLLMProvider.mockReturnValue(mockLLMConnector);

// Setup WorkspaceChats mock
WorkspaceChats.new.mockResolvedValue({ chat: { id: 'mock-chat-id' } });

// Setup mock response object for streaming
mockResponse = {
write: jest.fn(),
};
});

describe('chatSync', () => {
test('should handle OpenAI vision multimodal messages', async () => {
const multiModalPrompt = [
{
type: 'text',
text: 'What do you see in this image?'
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,abc123',
detail: 'low'
}
}
];

const prompt = extractTextContent(multiModalPrompt);
const attachments = extractAttachments(multiModalPrompt);
const result = await OpenAICompatibleChat.chatSync({
workspace: mockWorkspace,
prompt,
attachments,
systemPrompt: 'You are a helpful assistant',
history: [
{ role: 'user', content: 'Previous message' },
{ role: 'assistant', content: 'Previous response' }
],
temperature: 0.7
});

// Verify chat was saved with correct format
expect(WorkspaceChats.new).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: mockWorkspace.id,
prompt: multiModalPrompt[0].text,
response: expect.objectContaining({
text: 'Mock response',
attachments: [{
name: 'uploaded_image_0',
mime: 'image/png',
contentString: multiModalPrompt[1].image_url.url
}]
})
})
);

// Verify response format
expect(result).toEqual(
expect.objectContaining({
object: 'chat.completion',
choices: expect.arrayContaining([
expect.objectContaining({
message: expect.objectContaining({
role: 'assistant',
content: 'Mock response',
}),
}),
]),
})
);
});

test('should handle regular text messages in OpenAI format', async () => {
const promptString = 'Hello world';
const result = await OpenAICompatibleChat.chatSync({
workspace: mockWorkspace,
prompt: promptString,
systemPrompt: 'You are a helpful assistant',
history: [
{ role: 'user', content: 'Previous message' },
{ role: 'assistant', content: 'Previous response' }
],
temperature: 0.7
});

// Verify chat was saved without attachments
expect(WorkspaceChats.new).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: mockWorkspace.id,
prompt: promptString,
response: expect.objectContaining({
text: 'Mock response',
attachments: []
})
})
);

expect(result).toBeTruthy();
});
});

describe('streamChat', () => {
test('should handle OpenAI vision multimodal messages in streaming mode', async () => {
const multiModalPrompt = [
{
type: 'text',
text: 'What do you see in this image?'
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,abc123',
detail: 'low'
}
}
];

const prompt = extractTextContent(multiModalPrompt);
const attachments = extractAttachments(multiModalPrompt);
await OpenAICompatibleChat.streamChat({
workspace: mockWorkspace,
response: mockResponse,
prompt,
attachments,
systemPrompt: 'You are a helpful assistant',
history: [
{ role: 'user', content: 'Previous message' },
{ role: 'assistant', content: 'Previous response' }
],
temperature: 0.7
});

// Verify streaming was handled
expect(mockLLMConnector.streamGetChatCompletion).toHaveBeenCalled();
expect(mockLLMConnector.handleStream).toHaveBeenCalled();

// Verify chat was saved with attachments
expect(WorkspaceChats.new).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: mockWorkspace.id,
prompt: multiModalPrompt[0].text,
response: expect.objectContaining({
text: 'Mock streamed response',
attachments: [{
name: 'uploaded_image_0',
mime: 'image/png',
contentString: multiModalPrompt[1].image_url.url
}]
})
})
);
});

test('should handle regular text messages in streaming mode', async () => {
const promptString = 'Hello world';
await OpenAICompatibleChat.streamChat({
workspace: mockWorkspace,
response: mockResponse,
prompt: promptString,
systemPrompt: 'You are a helpful assistant',
history: [
{ role: 'user', content: 'Previous message' },
{ role: 'assistant', content: 'Previous response' }
],
temperature: 0.7
});

// Verify streaming was handled
expect(mockLLMConnector.streamGetChatCompletion).toHaveBeenCalled();
expect(mockLLMConnector.handleStream).toHaveBeenCalled();

// Verify chat was saved without attachments
expect(WorkspaceChats.new).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: mockWorkspace.id,
prompt: promptString,
response: expect.objectContaining({
text: 'Mock streamed response',
attachments: []
})
})
);
});
});
});
128 changes: 128 additions & 0 deletions server/__tests__/utils/chats/openaiHelpers.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
/* eslint-env jest, node */
const { extractTextContent, extractAttachments } = require('../../../endpoints/api/openai/helpers');

describe('OpenAI Helper Functions', () => {
describe('extractTextContent', () => {
test('should return string content as-is when not an array', () => {
const content = 'Hello world';
expect(extractTextContent(content)).toBe('Hello world');
});

test('should extract text from multi-modal content array', () => {
const content = [
{
type: 'text',
text: 'What do you see in this image?'
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,abc123',
detail: 'low'
}
},
{
type: 'text',
text: 'And what about this part?'
}
];
expect(extractTextContent(content)).toBe('What do you see in this image?\nAnd what about this part?');
});

test('should handle empty array', () => {
expect(extractTextContent([])).toBe('');
});

test('should handle array with no text content', () => {
const content = [
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,abc123',
detail: 'low'
}
}
];
expect(extractTextContent(content)).toBe('');
});
});

describe('extractAttachments', () => {
test('should return empty array for string content', () => {
const content = 'Hello world';
expect(extractAttachments(content)).toEqual([]);
});

test('should extract image attachments with correct mime types', () => {
const content = [
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,abc123',
detail: 'low'
}
},
{
type: 'text',
text: 'Between images'
},
{
type: 'image_url',
image_url: {
url: 'data:image/jpeg;base64,def456',
detail: 'high'
}
}
];
expect(extractAttachments(content)).toEqual([
{
name: 'uploaded_image_0',
mime: 'image/png',
contentString: 'data:image/png;base64,abc123'
},
{
name: 'uploaded_image_1',
mime: 'image/jpeg',
contentString: 'data:image/jpeg;base64,def456'
}
]);
});

test('should handle invalid data URLs with PNG fallback', () => {
const content = [
{
type: 'image_url',
image_url: {
url: 'invalid-data-url',
detail: 'low'
}
}
];
expect(extractAttachments(content)).toEqual([
{
name: 'uploaded_image_0',
mime: 'image/png',
contentString: 'invalid-data-url'
}
]);
});

test('should handle empty array', () => {
expect(extractAttachments([])).toEqual([]);
});

test('should handle array with no image content', () => {
const content = [
{
type: 'text',
text: 'Just some text'
},
{
type: 'text',
text: 'More text'
}
];
expect(extractAttachments(content)).toEqual([]);
});
});
});
Loading