Skip to content

Commit 87262d1

Browse files
committed
Save conversation to DB in the background, as an asyncio task
1 parent f929ff8 commit 87262d1

File tree

5 files changed

+34
-27
lines changed

5 files changed

+34
-27
lines changed

src/khoj/processor/conversation/anthropic/anthropic_chat.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import asyncio
12
import logging
23
from datetime import datetime, timedelta
34
from typing import AsyncGenerator, Dict, List, Optional
@@ -193,13 +194,13 @@ async def converse_anthropic(
193194
if conversation_commands == [ConversationCommand.Notes] and is_none_or_empty(references):
194195
response = prompts.no_notes_found.format()
195196
if completion_func:
196-
await completion_func(chat_response=response)
197+
asyncio.create_task(completion_func(chat_response=response))
197198
yield response
198199
return
199200
elif conversation_commands == [ConversationCommand.Online] and is_none_or_empty(online_results):
200201
response = prompts.no_online_results_found.format()
201202
if completion_func:
202-
await completion_func(chat_response=response)
203+
asyncio.create_task(completion_func(chat_response=response))
203204
yield response
204205
return
205206

@@ -251,4 +252,4 @@ async def converse_anthropic(
251252

252253
# Call completion_func once finish streaming and we have the full response
253254
if completion_func:
254-
await completion_func(chat_response=full_response)
255+
asyncio.create_task(completion_func(chat_response=full_response))

src/khoj/processor/conversation/google/gemini_chat.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import asyncio
12
import logging
23
from datetime import datetime, timedelta
34
from typing import AsyncGenerator, Dict, List, Optional
@@ -218,13 +219,13 @@ async def converse_gemini(
218219
if conversation_commands == [ConversationCommand.Notes] and is_none_or_empty(references):
219220
response = prompts.no_notes_found.format()
220221
if completion_func:
221-
await completion_func(chat_response=response)
222+
asyncio.create_task(completion_func(chat_response=response))
222223
yield response
223224
return
224225
elif conversation_commands == [ConversationCommand.Online] and is_none_or_empty(online_results):
225226
response = prompts.no_online_results_found.format()
226227
if completion_func:
227-
await completion_func(chat_response=response)
228+
asyncio.create_task(completion_func(chat_response=response))
228229
yield response
229230
return
230231

@@ -275,4 +276,4 @@ async def converse_gemini(
275276

276277
# Call completion_func once finish streaming and we have the full response
277278
if completion_func:
278-
await completion_func(chat_response=full_response)
279+
asyncio.create_task(completion_func(chat_response=full_response))

src/khoj/processor/conversation/offline/chat_model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -202,13 +202,13 @@ async def converse_offline(
202202
if conversation_commands == [ConversationCommand.Notes] and is_none_or_empty(references):
203203
response = prompts.no_notes_found.format()
204204
if completion_func:
205-
await completion_func(chat_response=response)
205+
asyncio.create_task(completion_func(chat_response=response))
206206
yield response
207207
return
208208
elif conversation_commands == [ConversationCommand.Online] and is_none_or_empty(online_results):
209209
response = prompts.no_online_results_found.format()
210210
if completion_func:
211-
await completion_func(chat_response=response)
211+
asyncio.create_task(completion_func(chat_response=response))
212212
yield response
213213
return
214214

@@ -317,7 +317,7 @@ def _sync_llm_thread():
317317

318318
# Call the completion function after streaming is done
319319
if completion_func:
320-
await completion_func(chat_response=aggregated_response_container["response"])
320+
asyncio.create_task(completion_func(chat_response=aggregated_response_container["response"]))
321321

322322

323323
def send_message_to_model_offline(

src/khoj/processor/conversation/openai/gpt.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import asyncio
12
import logging
23
from datetime import datetime, timedelta
34
from typing import AsyncGenerator, Dict, List, Optional
@@ -219,13 +220,13 @@ async def converse_openai(
219220
if conversation_commands == [ConversationCommand.Notes] and is_none_or_empty(references):
220221
response = prompts.no_notes_found.format()
221222
if completion_func:
222-
await completion_func(chat_response=response)
223+
asyncio.create_task(completion_func(chat_response=response))
223224
yield response
224225
return
225226
elif conversation_commands == [ConversationCommand.Online] and is_none_or_empty(online_results):
226227
response = prompts.no_online_results_found.format()
227228
if completion_func:
228-
await completion_func(chat_response=response)
229+
asyncio.create_task(completion_func(chat_response=response))
229230
yield response
230231
return
231232

@@ -277,7 +278,7 @@ async def converse_openai(
277278

278279
# Call completion_func once finish streaming and we have the full response
279280
if completion_func:
280-
await completion_func(chat_response=full_response)
281+
asyncio.create_task(completion_func(chat_response=full_response))
281282

282283

283284
def clean_response_schema(schema: BaseModel | dict) -> dict:

src/khoj/routers/api_chat.py

+19-15
Original file line numberDiff line numberDiff line change
@@ -998,22 +998,26 @@ def collect_telemetry():
998998
return
999999

10001000
llm_response = construct_automation_created_message(automation, crontime, query_to_run, subject)
1001-
await save_to_conversation_log(
1002-
q,
1003-
llm_response,
1004-
user,
1005-
meta_log,
1006-
user_message_time,
1007-
intent_type="automation",
1008-
client_application=request.user.client_app,
1009-
conversation_id=conversation_id,
1010-
inferred_queries=[query_to_run],
1011-
automation_id=automation.id,
1012-
query_images=uploaded_images,
1013-
train_of_thought=train_of_thought,
1014-
raw_query_files=raw_query_files,
1015-
tracer=tracer,
1001+
# Trigger task to save conversation to DB
1002+
asyncio.create_task(
1003+
save_to_conversation_log(
1004+
q,
1005+
llm_response,
1006+
user,
1007+
meta_log,
1008+
user_message_time,
1009+
intent_type="automation",
1010+
client_application=request.user.client_app,
1011+
conversation_id=conversation_id,
1012+
inferred_queries=[query_to_run],
1013+
automation_id=automation.id,
1014+
query_images=uploaded_images,
1015+
train_of_thought=train_of_thought,
1016+
raw_query_files=raw_query_files,
1017+
tracer=tracer,
1018+
)
10161019
)
1020+
# Send LLM Response
10171021
async for result in send_llm_response(llm_response, tracer.get("usage")):
10181022
yield result
10191023
return

0 commit comments

Comments
 (0)