diff --git a/README.rst b/README.rst index 08322f51f2..1323a065d8 100644 --- a/README.rst +++ b/README.rst @@ -194,6 +194,8 @@ Create estimators for linear models by using the `bigframes.ml.linear_model modu Create estimators for LLMs by using the `bigframes.ml.llm module `_. +* Use the `GeminiTextGenerator class `_ to create Gemini text generator models. Use these models + for text generation tasks. * Use the `PaLM2TextGenerator class `_ to create PaLM2 text generator models. Use these models for text generation tasks. * Use the `PaLM2TextEmbeddingGenerator class `_ to create PaLM2 text embedding generator models. diff --git a/bigframes/ml/llm.py b/bigframes/ml/llm.py index 3607000323..f44310f6a5 100644 --- a/bigframes/ml/llm.py +++ b/bigframes/ml/llm.py @@ -41,6 +41,8 @@ _EMBEDDING_GENERATOR_GECKO_MULTILINGUAL_ENDPOINT, ) +_GEMINI_PRO_ENDPOINT = "gemini-pro" + _ML_GENERATE_TEXT_STATUS = "ml_generate_text_status" _ML_EMBED_TEXT_STATUS = "ml_embed_text_status" @@ -396,3 +398,175 @@ def to_gbq( new_model = self._bqml_model.copy(model_name, replace) return new_model.session.read_gbq_model(model_name) + + +@log_adapter.class_logger +class GeminiTextGenerator(base.Predictor): + """Gemini text generator LLM model. + + Args: + session (bigframes.Session or None): + BQ session to create the model. If None, use the global default session. + connection_name (str or None): + Connection to connect with remote service. str of the format ... + if None, use default connection in session context. BigQuery DataFrame will try to create the connection and attach + permission if the connection isn't fully setup. + """ + + def __init__( + self, + session: Optional[bigframes.Session] = None, + connection_name: Optional[str] = None, + ): + self.session = session or bpd.get_global_session() + self._bq_connection_manager = clients.BqConnectionManager( + self.session.bqconnectionclient, self.session.resourcemanagerclient + ) + + connection_name = connection_name or self.session._bq_connection + self.connection_name = self._bq_connection_manager.resolve_full_connection_name( + connection_name, + default_project=self.session._project, + default_location=self.session._location, + ) + + self._bqml_model_factory = globals.bqml_model_factory() + self._bqml_model: core.BqmlModel = self._create_bqml_model() + + def _create_bqml_model(self): + # Parse and create connection if needed. + if not self.connection_name: + raise ValueError( + "Must provide connection_name, either in constructor or through session options." + ) + connection_name_parts = self.connection_name.split(".") + if len(connection_name_parts) != 3: + raise ValueError( + f"connection_name must be of the format .., got {self.connection_name}." + ) + self._bq_connection_manager.create_bq_connection( + project_id=connection_name_parts[0], + location=connection_name_parts[1], + connection_id=connection_name_parts[2], + iam_role="aiplatform.user", + ) + + options = {"endpoint": _GEMINI_PRO_ENDPOINT} + + return self._bqml_model_factory.create_remote_model( + session=self.session, connection_name=self.connection_name, options=options + ) + + @classmethod + def _from_bq( + cls, session: bigframes.Session, model: bigquery.Model + ) -> GeminiTextGenerator: + assert model.model_type == "MODEL_TYPE_UNSPECIFIED" + assert "remoteModelInfo" in model._properties + assert "connection" in model._properties["remoteModelInfo"] + + # Parse the remote model endpoint + model_connection = model._properties["remoteModelInfo"]["connection"] + + text_generator_model = cls(session=session, connection_name=model_connection) + text_generator_model._bqml_model = core.BqmlModel(session, model) + return text_generator_model + + def predict( + self, + X: Union[bpd.DataFrame, bpd.Series], + temperature: float = 0.9, + max_output_tokens: int = 8192, + top_k: int = 40, + top_p: float = 1.0, + ) -> bpd.DataFrame: + """Predict the result from input DataFrame. + + Args: + X (bigframes.dataframe.DataFrame or bigframes.series.Series): + Input DataFrame or Series, which contains only one column of prompts. + Prompts can include preamble, questions, suggestions, instructions, or examples. + + temperature (float, default 0.9): + The temperature is used for sampling during the response generation, which occurs when topP and topK are applied. Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response, while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic: the highest probability response is always selected. + Default 0.9. Possible values [0.0, 1.0]. + + max_output_tokens (int, default 8192): + Maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words. + Specify a lower value for shorter responses and a higher value for potentially longer responses. + Default 8192. Possible values are in the range [1, 8192]. + + top_k (int, default 40): + Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-K of 3 means that the next token is selected from among the three most probable tokens by using temperature. + For each token selection step, the top-K tokens with the highest probabilities are sampled. Then tokens are further filtered based on top-P with the final token selected using temperature sampling. + Specify a lower value for less random responses and a higher value for more random responses. + Default 40. Possible values [1, 40]. + + top_p (float, default 0.95):: + Top-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next token by using temperature and excludes C as a candidate. + Specify a lower value for less random responses and a higher value for more random responses. + Default 1.0. Possible values [0.0, 1.0]. + + + Returns: + bigframes.dataframe.DataFrame: DataFrame of shape (n_samples, n_input_columns + n_prediction_columns). Returns predicted values. + """ + + # Params reference: https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models + if temperature < 0.0 or temperature > 1.0: + raise ValueError(f"temperature must be [0.0, 1.0], but is {temperature}.") + + if max_output_tokens not in range(1, 8193): + raise ValueError( + f"max_output_token must be [1, 8192] for Gemini model, but is {max_output_tokens}." + ) + + if top_k not in range(1, 41): + raise ValueError(f"top_k must be [1, 40], but is {top_k}.") + + if top_p < 0.0 or top_p > 1.0: + raise ValueError(f"top_p must be [0.0, 1.0], but is {top_p}.") + + (X,) = utils.convert_to_dataframe(X) + + if len(X.columns) != 1: + raise ValueError( + f"Only support one column as input. {constants.FEEDBACK_LINK}" + ) + + # BQML identified the column by name + col_label = cast(blocks.Label, X.columns[0]) + X = X.rename(columns={col_label: "prompt"}) + + options = { + "temperature": temperature, + "max_output_tokens": max_output_tokens, + "top_k": top_k, + "top_p": top_p, + "flatten_json_output": True, + } + + df = self._bqml_model.generate_text(X, options) + + if (df[_ML_GENERATE_TEXT_STATUS] != "").any(): + warnings.warn( + f"Some predictions failed. Check column {_ML_GENERATE_TEXT_STATUS} for detailed status. You may want to filter the failed rows and retry.", + RuntimeWarning, + ) + + return df + + def to_gbq(self, model_name: str, replace: bool = False) -> GeminiTextGenerator: + """Save the model to BigQuery. + + Args: + model_name (str): + the name of the model. + replace (bool, default False): + whether to replace if the model already exists. Default to False. + + Returns: + GeminiTextGenerator: saved model.""" + + new_model = self._bqml_model.copy(model_name, replace) + return new_model.session.read_gbq_model(model_name) diff --git a/bigframes/ml/loader.py b/bigframes/ml/loader.py index 99a31922d8..dafda43e9d 100644 --- a/bigframes/ml/loader.py +++ b/bigframes/ml/loader.py @@ -55,6 +55,7 @@ llm._TEXT_GENERATOR_BISON_32K_ENDPOINT: llm.PaLM2TextGenerator, llm._EMBEDDING_GENERATOR_GECKO_ENDPOINT: llm.PaLM2TextEmbeddingGenerator, llm._EMBEDDING_GENERATOR_GECKO_MULTILINGUAL_ENDPOINT: llm.PaLM2TextEmbeddingGenerator, + llm._GEMINI_PRO_ENDPOINT: llm.GeminiTextGenerator, } ) diff --git a/docs/templates/toc.yml b/docs/templates/toc.yml index d387a513a1..e921279418 100644 --- a/docs/templates/toc.yml +++ b/docs/templates/toc.yml @@ -105,6 +105,8 @@ - items: - name: Overview uid: bigframes.ml.llm + - name: GeminiTextGenerator + uid: bigframes.ml.llm.GeminiTextGenerator - name: PaLM2TextGenerator uid: bigframes.ml.llm.PaLM2TextGenerator - name: PaLM2TextEmbeddingGenerator diff --git a/tests/system/small/ml/conftest.py b/tests/system/small/ml/conftest.py index 422ea6f1f4..8bf08906f9 100644 --- a/tests/system/small/ml/conftest.py +++ b/tests/system/small/ml/conftest.py @@ -267,6 +267,11 @@ def palm2_embedding_generator_multilingual_model( ) +@pytest.fixture(scope="session") +def gemini_text_generator_model(session, bq_connection) -> llm.GeminiTextGenerator: + return llm.GeminiTextGenerator(session=session, connection_name=bq_connection) + + @pytest.fixture(scope="session") def linear_remote_model_params() -> dict: # Pre-deployed endpoint of linear reg model in Vertex. diff --git a/tests/system/small/ml/test_llm.py b/tests/system/small/ml/test_llm.py index 805cee4fec..db959b854e 100644 --- a/tests/system/small/ml/test_llm.py +++ b/tests/system/small/ml/test_llm.py @@ -272,3 +272,42 @@ def test_embedding_generator_predict_series_success( series = df["text_embedding"] value = series[0] assert len(value) == 768 + + +def test_create_gemini_text_generator_model( + gemini_text_generator_model, dataset_id, bq_connection +): + # Model creation doesn't return error + assert gemini_text_generator_model is not None + assert gemini_text_generator_model._bqml_model is not None + + # save, load to ensure configuration was kept + reloaded_model = gemini_text_generator_model.to_gbq( + f"{dataset_id}.temp_text_model", replace=True + ) + assert f"{dataset_id}.temp_text_model" == reloaded_model._bqml_model.model_name + assert reloaded_model.connection_name == bq_connection + + +@pytest.mark.flaky(retries=2, delay=120) +def test_gemini_text_generator_predict_default_params_success( + gemini_text_generator_model, llm_text_df +): + df = gemini_text_generator_model.predict(llm_text_df).to_pandas() + assert df.shape == (3, 4) + assert "ml_generate_text_llm_result" in df.columns + series = df["ml_generate_text_llm_result"] + assert all(series.str.len() > 20) + + +@pytest.mark.flaky(retries=2, delay=120) +def test_gemini_text_generator_predict_with_params_success( + gemini_text_generator_model, llm_text_df +): + df = gemini_text_generator_model.predict( + llm_text_df, temperature=0.5, max_output_tokens=100, top_k=20, top_p=0.5 + ).to_pandas() + assert df.shape == (3, 4) + assert "ml_generate_text_llm_result" in df.columns + series = df["ml_generate_text_llm_result"] + assert all(series.str.len() > 20)