Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
β’
9f66c7d
1
Parent(s):
3220193
fix
Browse files
src/app/queries/predictNextPanels.ts
CHANGED
@@ -61,19 +61,21 @@ export const predictNextPanels = async ({
|
|
61 |
try {
|
62 |
// console.log(`calling predict(${query}, ${nbTotalPanels})`)
|
63 |
result = `${await predict(query, nbPanelsToGenerate) || ""}`.trim()
|
|
|
64 |
if (!result.length) {
|
65 |
-
throw new Error("empty result!")
|
66 |
}
|
67 |
} catch (err) {
|
68 |
// console.log(`prediction of the story failed, trying again..`)
|
69 |
try {
|
70 |
-
result = `${await predict(query+"
|
|
|
71 |
if (!result.length) {
|
72 |
-
throw new Error("empty result!")
|
73 |
}
|
74 |
} catch (err) {
|
75 |
-
console.error(`prediction of the story failed
|
76 |
-
throw new Error(`failed to generate the story ${err}`)
|
77 |
}
|
78 |
}
|
79 |
|
|
|
61 |
try {
|
62 |
// console.log(`calling predict(${query}, ${nbTotalPanels})`)
|
63 |
result = `${await predict(query, nbPanelsToGenerate) || ""}`.trim()
|
64 |
+
console.log("LLM result (1st trial):", result)
|
65 |
if (!result.length) {
|
66 |
+
throw new Error("empty result on 1st trial!")
|
67 |
}
|
68 |
} catch (err) {
|
69 |
// console.log(`prediction of the story failed, trying again..`)
|
70 |
try {
|
71 |
+
result = `${await predict(query + " \n ", nbPanelsToGenerate) || ""}`.trim()
|
72 |
+
console.log("LLM result (2nd trial):", result)
|
73 |
if (!result.length) {
|
74 |
+
throw new Error("empty result on 2nd trial!")
|
75 |
}
|
76 |
} catch (err) {
|
77 |
+
console.error(`prediction of the story failed twice π©`)
|
78 |
+
throw new Error(`failed to generate the story twice π© ${err}`)
|
79 |
}
|
80 |
}
|
81 |
|
src/app/queries/predictWithHuggingFace.ts
CHANGED
@@ -14,7 +14,7 @@ export async function predict(inputs: string, nbPanels: number): Promise<string>
|
|
14 |
|
15 |
// we don't require a lot of token for our task
|
16 |
// but to be safe, let's count ~110 tokens per panel
|
17 |
-
const nbMaxNewTokens = nbPanels * 110
|
18 |
|
19 |
switch (llmEngine) {
|
20 |
case "INFERENCE_ENDPOINT":
|
|
|
14 |
|
15 |
// we don't require a lot of token for our task
|
16 |
// but to be safe, let's count ~110 tokens per panel
|
17 |
+
const nbMaxNewTokens = nbPanels * 130 // 110 isn't enough anymore for long dialogues
|
18 |
|
19 |
switch (llmEngine) {
|
20 |
case "INFERENCE_ENDPOINT":
|