Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
khanh nguyen
Openai Service
Commits
cd31351c
Commit
cd31351c
authored
Dec 13, 2025
by
server1
Browse files
KHANHNQ: Update return token when chat
parent
51287934
Changes
4
Hide whitespace changes
Inline
Side-by-side
ai_service/openai_handler.py
View file @
cd31351c
...
...
@@ -189,7 +189,7 @@ class OpenAIHandler:
pass
def
chat
(
self
,
chat_id
:
str
,
input
,
model
:
str
,
on_response_callback
:
Callable
[[
str
,
str
,
str
,
str
],
None
],
on_response_callback
:
Callable
[[
str
,
str
,
str
,
str
,
Optional
[
str
],
str
],
None
],
spread_sheet_file_ids
:
List
[
str
]
=
[],):
if
model
:
self
.
model
=
model
...
...
@@ -207,9 +207,9 @@ class OpenAIHandler:
)
# type: ignore
for
event
in
stream
:
if
event
.
type
==
"response.output_text.delta"
:
on_response_callback
(
self
.
conversation_id
,
chat_id
,
event
.
delta
,
ResponseType
.
CONTENT
)
on_response_callback
(
self
.
conversation_id
,
chat_id
,
event
.
delta
,
ResponseType
.
CONTENT
,
None
,
model
)
if
event
.
type
==
"response.output_text.done"
:
on_response_callback
(
self
.
conversation_id
,
chat_id
,
event
.
text
,
ResponseType
.
DONE
)
on_response_callback
(
self
.
conversation_id
,
chat_id
,
event
.
text
,
ResponseType
.
DONE
,
input
[
-
1
].
get
(
"content"
)[
0
].
get
(
"text"
),
model
)
# def file_handler(self, url: str):
# file = self.client.files.create(
...
...
ai_service/openai_service_management.py
View file @
cd31351c
import
base64
import
threading
from
typing
import
Dict
,
List
from
typing
import
Dict
,
List
,
Optional
from
ai_service.file_management
import
FileManagement
from
ai_service.token_caculator
import
TokenCalculator
from
commonkit.logging.logging_config
import
LogConfig
from
dto.chat_input_dto
import
ChatInputDTO
from
dto.chat_dto
import
ChatDTO
...
...
@@ -22,6 +23,7 @@ from dto.create_conversation_name_result_dto import CreateConversationNameResult
from
enums.content_types
import
ContentTypes
from
enums.input_role
import
InputRoles
from
enums.media_type
import
MediaType
from
enums.response_event_type
import
ResponseType
from
enums.warning_result
import
WarningResult
log_config
=
LogConfig
(
__name__
)
...
...
@@ -195,13 +197,17 @@ class OpenAIServiceManagement():
on_response_callback
=
self
.
on_response_callback
)
def
on_response_callback
(
self
,
conversation_id
:
str
,
chat_id
:
str
,
text
:
str
,
event_type
:
str
):
def
on_response_callback
(
self
,
conversation_id
:
str
,
chat_id
:
str
,
text
:
str
,
event_type
:
str
,
input_text
:
Optional
[
str
]
=
None
,
model
:
str
=
"gpt-5"
):
chat_response_dto
=
ChatResponseDTO
()
chat_response_dto
.
conversationId
=
conversation_id
chat_response_dto
.
chatId
=
chat_id
chat_response_dto
.
text
=
text
chat_response_dto
.
eventType
=
event_type
if
event_type
==
ResponseType
.
DONE
:
if
input_text
:
chat_response_dto
.
inputTokenCount
=
TokenCalculator
.
count
(
text
=
input_text
,
model
=
model
)
chat_response_dto
.
outputTokenCount
=
TokenCalculator
.
count
(
text
=
text
,
model
=
model
)
logger
.
info
(
chat_response_dto
)
rbmq_message_dict
=
{
"event"
:
AIMessageType
.
CHAT
,
...
...
ai_service/token_caculator.py
0 → 100644
View file @
cd31351c
# Token calculator utility
# Requires: tiktoken (recommended). Fallback to simple whitespace tokenizer if tiktoken not available.
# pip install tiktoken
from
typing
import
Optional
class
TokenCalculator
:
@
staticmethod
def
get_encoder
(
model_name
:
str
):
"""
Try to return a tiktoken encoder for the given model_name.
If tiktoken is not installed or encoding_for_model fails, return a simple fallback encoder.
The encoder returned is a callable that takes a string and returns a list/iterable of token ids.
"""
try
:
import
tiktoken
# type: ignore
try
:
enc
=
tiktoken
.
encoding_for_model
(
model_name
)
except
Exception
:
# fallback to generic cl100k_base
enc
=
tiktoken
.
get_encoding
(
"cl100k_base"
)
return
lambda
text
:
enc
.
encode
(
text
)
except
Exception
:
# Fallback: whitespace-based tokenizer (less accurate)
return
lambda
text
:
text
.
split
()
@
staticmethod
def
count
(
text
:
str
,
model
:
str
=
"gpt-5"
)
->
int
:
"""
Count tokens for a single string using the specified model.
Parameters:
text: input string (message input or output)
model: model name (e.g., "gpt-3.5-turbo"). If None, uses self.default_model.
Returns:
int: estimated token count
"""
if
text
is
None
:
return
0
encoder
=
TokenCalculator
.
get_encoder
(
model
)
try
:
tokens
=
encoder
(
text
)
# If encoder returns encoded ids (list of ints) -> take length
return
len
(
tokens
)
except
Exception
:
# As a last resort, fallback to simple split
return
len
(
text
.
split
())
\ No newline at end of file
dto/chat_response_dto.py
View file @
cd31351c
...
...
@@ -6,4 +6,6 @@ class ChatResponseDTO(BaseDTO):
text
:
str
=
None
conversationId
:
str
=
None
chatId
:
str
=
None
eventType
:
str
=
None
\ No newline at end of file
eventType
:
str
=
None
inputTokenCount
:
int
=
None
outputTokenCount
:
int
=
None
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment