File tree Expand file tree Collapse file tree 2 files changed +11
-8
lines changed Expand file tree Collapse file tree 2 files changed +11
-8
lines changed Original file line number Diff line number Diff line change @@ -72,7 +72,7 @@ def load_llm(
7272 llm = FakeListChatModel (
7373 verbose = llm_verbosity ,
7474 responses = [f"Fake answer n°{ i } : { lorem_ipsum } " for i in range (1 , 100 )],
75- callbacks = [PriceCountingCallback (verbose = llm_verbosity )],
75+ callbacks = [PriceCountingCallback (verbose = llm_verbosity )] + langfuse_callback_holder ,
7676 disable_streaming = True , # Not needed and might break cache
7777 cache = False ,
7878 ** extra_model_args ,
@@ -122,7 +122,7 @@ def load_llm(
122122 cache = llm_cache ,
123123 disable_streaming = True , # Not needed and might break cache
124124 verbose = llm_verbosity ,
125- callbacks = [PriceCountingCallback (verbose = llm_verbosity )] + langfuse_callback_holder ,
125+ callbacks = [PriceCountingCallback (verbose = llm_verbosity )] + langfuse_callback_holder , # use langchain's callback to langfuse
126126 ** extra_model_args ,
127127 )
128128 else :
@@ -136,7 +136,7 @@ def load_llm(
136136 cache = llm_cache ,
137137 verbose = llm_verbosity ,
138138 tags = tags ,
139- callbacks = [PriceCountingCallback (verbose = llm_verbosity )] + langfuse_callback_holder ,
139+ callbacks = [PriceCountingCallback (verbose = llm_verbosity )], # + langfuse_callback_holder, # do not use langchain's callback as chatlitellm seems buggy: we use directly litellm's backend instead
140140 ** extra_model_args ,
141141 )
142142 litellm .drop_params = True
Original file line number Diff line number Diff line change @@ -769,11 +769,14 @@ def create_langfuse_callback(version: str) -> None:
769769 ) and not is_private :
770770 red ("Activating langfuse callbacks" )
771771 try :
772- # # litellm's callbacks seem more flawed than langchain's
773- # import langfuse
774- # litellm.success_callback = ["langfuse"]
775- # litellm.failure_callback = ["langfuse"]
776-
772+ # # use litellm's callbacks for chatlitellm backend
773+ import litellm
774+ import langfuse
775+ litellm .success_callback = ["langfuse" ]
776+ litellm .failure_callback = ["langfuse" ]
777+
778+ # # and use langchain's callback for openai's backend
779+ # BUT as of october 2024 it seems buggy with chatlitellm, the modelname does not seem to be passed?
777780 from langfuse .callback import CallbackHandler as LangfuseCallback
778781 langfuse_callback = LangfuseCallback (
779782 secret_key = os .environ ["LANGFUSE_SECRET_KEY" ],
You can’t perform that action at this time.
0 commit comments