Skip to content

Commit 50d9aa7

Browse files
AlexeyMatveev686AlexeyMatveev686
AlexeyMatveev686
authored and
AlexeyMatveev686
committed
Fix bug #61064
1 parent 64eeecc commit 50d9aa7

File tree

8 files changed

+135
-37
lines changed

8 files changed

+135
-37
lines changed

sdkjs-plugins/content/openai/index.html

+26-3
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
<div class="div_settings">
5555
<div class="div_parametr" title="The model which will generate the completion. Some models are suitable for natural language tasks, others specialize in code.">
5656
<label class="header i18n" style="margin-bottom: 2px;">Model</label>
57-
<select id="sel_models" class="form_control"></select>
57+
<select id="sel_models" class="form-control"></select>
5858
</div>
5959
<div class="div_parametr noselect" title="The maximum number of tokens to generate in the completion.">
6060
<label class="header i18n">Maximum length</label>
@@ -87,12 +87,35 @@
8787
</div>
8888
</div>
8989
<div id="div_err" class="hidden" style="margin-top: 10px;">
90-
<label style="color: red;">Error:</label>
90+
<label class="lb_err">Error:</label>
9191
<br>
9292
<label id="lb_err"></label>
9393
</div>
9494
<div id="footerCont">
95-
<label id="logoutLink" class="link i18n">Reconfigure</label>
95+
<div>
96+
<label id="logoutLink" class="link i18n">Reconfigure</label>
97+
</div>
98+
<div >
99+
<div id="div_modal" class="div_modal hidden form-control">
100+
<div class="div_row">
101+
<label id="lb_modal_tokens">0</label>
102+
<label class="i18n">tokens in request.</label>
103+
</div>
104+
<div class="div_row">
105+
<label class="i18n">Up to</label>
106+
<label id="lb_modal_length">256</label>
107+
<label class="i18n">tokens in response.</label>
108+
</div>
109+
<div id="modal_error" class="div_row hidden lb_err">
110+
<label class="i18n">This model can only process maximum of</label>
111+
<label id="modal_err_len">4000</label>
112+
<label class="i18n">tokens in a single request, please reduce your prompt or response length.</label>
113+
</div>
114+
</div>
115+
<div id="div_tokens" class="form-control div_tokens">
116+
<label id="lb_tokens">0</label>
117+
</div>
118+
</div>
96119
</div>
97120
</div>
98121
<div id="loader-container" class="asc-loader-container loader hidden"></div>

sdkjs-plugins/content/openai/resources/css/styles.css

+27
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,39 @@ input::selection {
8888

8989
#footerCont {
9090
margin: 5px 0px;
91+
display: flex;
92+
justify-content: space-between;
9193
}
9294

9395
.div_settings {
9496
flex: 1;
9597
}
9698

99+
.div_tokens {
100+
display: flex;
101+
justify-content: center;
102+
align-items: center;
103+
border-radius: 12px;
104+
}
105+
106+
.div_row {
107+
margin: 5px;
108+
}
109+
110+
.lb_err {
111+
color: red;
112+
}
113+
114+
.div_modal {
115+
display: flex;
116+
flex-flow: column;
117+
position: fixed;
118+
bottom: 40px;
119+
right: 15px;
120+
left: 12px;
121+
height: fit-content;
122+
}
123+
97124
.loader {
98125
margin: 0 !important;
99126
position: absolute;

sdkjs-plugins/content/openai/scripts/code.js

+57-9
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@
2020
let loader;
2121
let elements = {};
2222
let apiKey = null;
23-
let timeout = null;
23+
let errTimeout = null;
24+
let tokenTimeot = null;
2425
let bCreateLoader = false;
2526
let maxTokens = 4000;
2627
const isIE = checkInternetExplorer();
@@ -77,9 +78,9 @@
7778
};
7879

7980
elements.reconfigure.onclick = function() {
80-
if (timeout) {
81-
clearTimeout(timeout);
82-
timeout = null;
81+
if (errTimeout) {
82+
clearTimeout(errTimeout);
83+
errTimeout = null;
8384
clearMainError();
8485
}
8586
localStorage.removeItem('OpenAiApiKey');
@@ -94,8 +95,28 @@
9495
elements.textArea.focus();
9596
};
9697

97-
elements.textArea.oninput = function() {
98+
elements.textArea.oninput = function(event) {
9899
elements.textArea.classList.remove('error_border');
100+
if (tokenTimeot) {
101+
clearTimeout(tokenTimeot);
102+
tokenTimeot = null;
103+
}
104+
tokenTimeot = setTimeout(function() {
105+
let text = event.target.value.trim();
106+
let tokens = window.Asc.OpenAIEncode(text);
107+
elements.lbTokens.innerText = tokens.length;
108+
elements.lbModalTokens.innerText = tokens.length;
109+
checkLen();
110+
}, 250);
111+
112+
};
113+
114+
elements.divTokens.onmouseenter = function(e) {
115+
elements.modal.classList.remove('hidden');
116+
};
117+
118+
elements.divTokens.onmouseleave = function(e) {
119+
elements.modal.classList.add('hidden');
99120
};
100121

101122
elements.btnSubmit.onclick = function() {
@@ -104,6 +125,9 @@
104125
elements.textArea.classList.add('error_border');
105126
return;
106127
};
128+
if (!elements.modalError.classList.contains('hidden')) {
129+
return;
130+
}
107131
createLoader();
108132

109133
fetch('https://api.openai.com/v1/completions', {
@@ -136,11 +160,11 @@
136160
.catch(function(error) {
137161
elements.mainError.classList.remove('hidden');
138162
elements.mainErrorLb.innerHTML = error.message;
139-
if (timeout) {
140-
clearTimeout(timeout);
141-
timeout = null;
163+
if (errTimeout) {
164+
clearTimeout(errTimeout);
165+
errTimeout = null;
142166
}
143-
timeout = setTimeout(clearMainError, 10000);
167+
errTimeout = setTimeout(clearMainError, 10000);
144168
console.error('Error:', error);
145169
}).finally(function(){
146170
destroyLoader();
@@ -167,6 +191,13 @@
167191
elements.keyError = document.getElementById('apiKeyError');
168192
elements.keyErrorLb = document.getElementById('lb_key_err');
169193
elements.keyErrorMes = document.getElementById('lb_key_err_mes');
194+
elements.lbTokens = document.getElementById('lb_tokens');
195+
elements.divTokens = document.getElementById('div_tokens');
196+
elements.modal = document.getElementById('div_modal');
197+
elements.lbModalTokens = document.getElementById('lb_modal_tokens');
198+
elements.lbModalLen = document.getElementById('lb_modal_length');
199+
elements.modalErrLen = document.getElementById('modal_err_len');
200+
elements.modalError = document.getElementById('modal_error');
170201
};
171202

172203
function initScrolls() {
@@ -196,6 +227,10 @@
196227

197228
function onSlInput(e) {
198229
e.target.nextElementSibling.innerText = e.target.value;
230+
if (e.target.id == elements.inpLenSl.id) {
231+
elements.lbModalLen.innerText = e.target.value;
232+
checkLen();
233+
}
199234
};
200235

201236
function fetchModels() {
@@ -234,6 +269,7 @@
234269
let event = document.createEvent('Event');
235270
event.initEvent('input', true, true);
236271
elements.inpLenSl.dispatchEvent(event);
272+
elements.modalErrLen.innerText = maxTokens;
237273
});
238274

239275
if ($('#sel_models').find('option[value=text-davinci-003]').length) {
@@ -321,6 +357,18 @@
321357
return rv !== -1;
322358
};
323359

360+
function checkLen() {
361+
let cur = new Number(elements.lbTokens.innerText);
362+
let maxLen = new Number(elements.inpLenSl.value);
363+
if (cur + maxLen > maxTokens) {
364+
elements.modalError.classList.remove('hidden');
365+
elements.lbTokens.classList.add('lb_err');
366+
} else {
367+
elements.modalError.classList.add('hidden');
368+
elements.lbTokens.classList.remove('lb_err');
369+
}
370+
};
371+
324372
window.Asc.plugin.onTranslate = function() {
325373
if (bCreateLoader)
326374
createLoader();

sdkjs-plugins/content/openai/translations/cs-CS.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
"Temperature" : "Teplota",
1919
"Stop sequences" : "Stop sekvence",
2020
"This plugin doesn't work in Internet Explorer." : "Tento plugin nefunguje v aplikaci Internet Explorer.",
21-
"This model's maximum context length is" : "Maximální délka kontextu tohoto modelu je",
22-
"tokens, however you requested" : "tokenů, nicméně jste požadovali",
23-
"tokens (" : "tokenů (",
24-
"in your prompt;" : "ve vaší výzvě;",
25-
"for the completion). Please reduce your prompt; or completion length." : "pro dokončení). Zkraťte prosím svou výzvu; nebo délka dokončení."
21+
"tokens in request." : "tokenů v žádosti.",
22+
"Up to" : "",
23+
"tokens in response." : "tokenů v reakci.",
24+
"This model can only process maximum of" : "Tento model může zpracovat maximálně",
25+
"tokens in a single request, please reduce your prompt or response length." : "tokenů v jednom požadavku, zkraťte prosím délku výzvy nebo odpovědi."
2626
}

sdkjs-plugins/content/openai/translations/de-DE.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
"Temperature" : "Temperatur",
1919
"Stop sequences" : "Stoppsequenzen",
2020
"This plugin doesn't work in Internet Explorer." : "Dieses Plugin funktioniert nicht im Internet Explorer.",
21-
"This model's maximum context length is" : "Die maximale Kontextlänge dieses Modells beträgt",
22-
"tokens, however you requested" : "Token, Sie haben jedoch",
23-
"tokens (" : "Token angefordert (",
24-
"in your prompt;" : "in Ihrer Eingabeaufforderung;",
25-
"for the completion). Please reduce your prompt; or completion length." : "für die Vervollständigung). Bitte reduzieren Sie Ihre Aufforderung; oder Abschlusslänge."
21+
"tokens in request." : "Token auf Anfrage.",
22+
"Up to" : "Bis zu",
23+
"tokens in response." : "Token als Antwort.",
24+
"This model can only process maximum of" : "Dieses Modell kann nur maximal",
25+
"tokens in a single request, please reduce your prompt or response length." : "Token in einer einzigen Anfrage verarbeiten.Bitte reduzieren Sie Ihre Aufforderungs- oder Antwortlänge."
2626
}

sdkjs-plugins/content/openai/translations/es-ES.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
"Temperature" : "Temperatura",
1919
"Stop sequences" : "Secuencias de parada",
2020
"This plugin doesn't work in Internet Explorer." : "Este complemento no funciona en Internet Explorer.",
21-
"This model's maximum context length is" : "La longitud máxima de contexto de este modelo es de",
22-
"tokens, however you requested" : "tokens, sin embargo, solicitó",
23-
"tokens (" : "tokens (",
24-
"in your prompt;" : "en su solicitud;",
25-
"for the completion). Please reduce your prompt; or completion length." : "para la finalización). Por favor, reduzca su solicitud o la duración de la finalización."
21+
"tokens in request." : "fichas a petición.",
22+
"Up to" : "Hasta",
23+
"tokens in response." : "fichas en respuesta.",
24+
"This model can only process maximum of" : "Este modelo solo puede procesar un máximo de",
25+
"tokens in a single request, please reduce your prompt or response length." : "tokens en una sola solicitud, reduzca la duración de su solicitud o respuesta."
2626
}

sdkjs-plugins/content/openai/translations/fr-FR.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
"Temperature" : "Température",
1919
"Stop sequences" : "Séquences d'arrêt",
2020
"This plugin doesn't work in Internet Explorer." : "Ce plugin ne fonctionne pas dans Internet Explorer.",
21-
"This model's maximum context length is" : "La longueur de contexte maximale de ce modèle est de",
22-
"tokens, however you requested" : "jetons, mais vous avez demandé",
23-
"tokens (" : "jetons (",
24-
"in your prompt;" : "dans votre invite;",
25-
"for the completion). Please reduce your prompt; or completion length." : " pour l'achèvement). Veuillez réduire votre invite; ou la longueur de l'achèvement."
21+
"tokens in request." : "jetons en demande.",
22+
"Up to" : "Jusqu'à",
23+
"tokens in response." : "jetons en réponse.",
24+
"This model can only process maximum of" : "Ce modèle ne peut traiter qu'un maximum de",
25+
"tokens in a single request, please reduce your prompt or response length." : "jetons dans une seule demande, veuillez réduire la longueur de votre invite ou de votre réponse."
2626
}

sdkjs-plugins/content/openai/translations/ru-RU.json

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
"Temperature" : "Температура",
1919
"Stop sequences" : "Стоп последовательности",
2020
"This plugin doesn't work in Internet Explorer." : "Этот плагин не работает в Internet Explorer.",
21-
"This model's maximum context length is" : "Максимальная длина контекста этой модели составляет",
22-
"tokens, however you requested" : "токенов, однако вы запросили",
23-
"tokens (" : "токенов (",
24-
"in your prompt;" : "в вашем запросе",
25-
"for the completion). Please reduce your prompt; or completion length." : " для максимального количества). Пожалуйста, сократите ваш запрос или максимальное количество токенов."
21+
"tokens in request." : "токенов в запросе.",
22+
"Up to" : "Вплоть до",
23+
"tokens in response." : "токенов в ответе.",
24+
"This model can only process maximum of" : "Эта модель может обрабатывать только",
25+
"tokens in a single request, please reduce your prompt or response length." : "токенов в одном запросе, пожалуйста уменьшите запрос или максисальное количество токенов."
2626
}

0 commit comments

Comments
 (0)