θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 15 additions & 9 deletions collector/utils/WhisperProviders/localWhisper.js
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,9 @@ class LocalWhisper {
try {
// Convert ESM to CommonJS via import so we can load this library.
const pipeline = (...args) =>
import("@xenova/transformers").then(({ pipeline }) =>
pipeline(...args)
);
import("@xenova/transformers").then(({ pipeline }) => {
return pipeline(...args);
});
return await pipeline("automatic-speech-recognition", this.model, {
cache_dir: this.cacheDir,
...(!fs.existsSync(this.modelPath)
Expand All @@ -173,24 +173,30 @@ class LocalWhisper {
: {}),
});
} catch (error) {
this.#log("Failed to load the native whisper model:", error);
throw error;
let errMsg = error.message;
if (errMsg.includes("Could not locate file")) {
errMsg =
"The native whisper model failed to download from the huggingface.co CDN. Your internet connection may be unstable or blocked by Huggingface.co - you will need to download the model manually and place it in the storage/models folder to use local Whisper transcription.";
}

this.#log(
`Failed to load the native whisper model: ${errMsg}`,
error.stack
);
throw new Error(errMsg);
}
}

async processFile(fullFilePath, filename) {
try {
const transcriberPromise = new Promise((resolve) =>
this.client().then((client) => resolve(client))
);
const audioDataPromise = new Promise((resolve) =>
this.#convertToWavAudioData(fullFilePath).then((audioData) =>
resolve(audioData)
)
);
const [audioData, transcriber] = await Promise.all([
audioDataPromise,
transcriberPromise,
this.client(),
]);

if (!audioData) {
Expand Down