Skip to content
GitLab
Explore
Projects
Groups
Snippets
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Memri
plugins
Memri Bot
Commits
0b54cc26
Commit
0b54cc26
authored
1 year ago
by
Alp Deniz Ogut
Browse files
Options
Download
Email Patches
Plain Diff
Remove prompt instruction tags & reconfigure streaming newlines
parent
0e572d40
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
memri_bot/plugin.py
+4
-5
memri_bot/plugin.py
with
4 additions
and
5 deletions
+4
-5
memri_bot/plugin.py
+
4
-
5
View file @
0b54cc26
...
...
@@ -111,7 +111,7 @@ class MemriBot(PluginBase):
return
Response
(
status_code
=
400
,
content
=
"rss_entry_id is missing"
)
# append instructions
context
+=
f
"
\n\n
<instructions>
\n
{
chat
.
system
}
"
context
+=
f
"
\n\n
{
chat
.
system
}
"
# truncate final query if it is longer that QUERY_MAX_TOKENS
chat
.
messages
[
-
1
].
content
=
truncate_with_textrank
(
chat
.
messages
[
-
1
].
content
,
QUERY_MAX_TOKENS
)
messages
=
[{
"role"
:
str
(
m
.
role
),
"content"
:
m
.
content
}
for
m
in
chat
.
messages
]
...
...
@@ -171,7 +171,7 @@ class MemriBot(PluginBase):
query
=
truncate_with_textrank
(
query
,
QUERY_MAX_TOKENS
)
context
=
truncate_with_textrank
(
context
,
CONTEXT_MAX_TOKENS
)
# append instructions
context
+=
f
"
\n\n
<instructions>
\n
{
system
}
"
context
+=
f
"
\n\n
{
system
}
"
logger
.
info
(
f
"Sending LLM request '
{
query
}
' with context '
{
context
}
'"
)
return
self
.
get_llm_response
(
query
,
context
,
stream
,
session
)
...
...
@@ -224,7 +224,7 @@ class MemriBot(PluginBase):
response_text
=
""
for
line
in
self
.
streaming_request
(
context
,
messages
=
[
m
.
to_dict
()
for
m
in
messages
]):
stream_chunk
=
json
.
loads
(
line
)
stream_chunk
=
json
.
loads
(
line
.
rstrip
(
'
\n
'
)
)
text
=
stream_chunk
[
"choices"
][
0
][
"text"
]
response_text
+=
text
yield
line
+
'
\n
'
...
...
@@ -252,5 +252,4 @@ class MemriBot(PluginBase):
with
s
.
post
(
f
"
{
LLM_API_URL
}
/v1/chat/completions"
,
stream
=
True
,
json
=
data
,
headers
=
headers
)
as
r
:
for
line
in
r
.
iter_lines
():
line
=
line
.
decode
(
"utf-8"
)
line
=
line
.
strip
()
yield
line
\ No newline at end of file
yield
line
+
'
\n
'
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment
Menu
Explore
Projects
Groups
Snippets