forked from databricks/databricks-ml-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
02_mlflow_logging_inference.py
200 lines (147 loc) · 8.11 KB
/
02_mlflow_logging_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# Databricks notebook source
# MAGIC %md
# MAGIC # Manage Mistral-7B-Instruct model with MLFlow on Databricks
# MAGIC
# MAGIC The [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) Large Language Model (LLM) is a instruct fine-tuned version of the [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) generative text model using a variety of publicly available conversation datasets.
# MAGIC
# MAGIC Environment for this notebook:
# MAGIC - Runtime: 14.0 GPU ML Runtime
# MAGIC - Instance: `g5.4xlarge` on AWS, `Standard_NV36ads_A10_v5` on Azure
# COMMAND ----------
# MAGIC %pip install -U "mlflow-skinny[databricks]>=2.6.0"
# MAGIC %pip install -U transformers==4.34.0
# MAGIC dbutils.library.restartPython()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Log the model to MLFlow
# COMMAND ----------
# It is suggested to pin the revision commit hash and not change it for reproducibility because the uploader might change the model afterwards; you can find the commmit history of Mistral-7B-Instruct-v0. in https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/commits/main
model_name = "mistralai/Mistral-7B-Instruct-v0.1"
revision = "3dc28cf29d2edd31a0a7b8f0b21637059815b4d5"
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load model
model = AutoModelForCausalLM.from_pretrained(model_name, revision=revision, torch_dtype=torch.bfloat16,
cache_dir="/local_disk0/.cache/huggingface/")
tokenizer = AutoTokenizer.from_pretrained(model_name, revision=revision)
# COMMAND ----------
# Define prompt template to get the expected features and performance for the chat versions. See our reference code in github for details: https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
def build_prompt(instruction):
return f"""<s>[INST]<<SYS>>\n{DEFAULT_SYSTEM_PROMPT}\n<</SYS>>\n\n\n{instruction}[/INST]\n"""
# COMMAND ----------
import mlflow
from mlflow.models import infer_signature
# Define model signature including params
input_example = {"prompt": build_prompt("What is Machine Learning?")}
inference_config = {
"temperature": 1.0,
"max_new_tokens": 100,
"do_sample": True,
}
signature = infer_signature(
model_input=input_example,
model_output="Machien Learning is...",
params=inference_config
)
# Log the model with its details such as artifacts, pip requirements and input example
with mlflow.start_run() as run:
mlflow.transformers.log_model(
transformers_model={
"model": model,
"tokenizer": tokenizer,
},
task="text-generation",
artifact_path="model",
pip_requirements=["torch==2.0.1", "transformers==4.34.0", "accelerate==0.21.0", "torchvision==0.15.2"],
input_example=input_example,
signature=signature,
# Add the metadata task so that the model serving endpoint created later will be optimized
metadata={"task": "llm/v1/completions"}
)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register the model to Unity Catalog
# MAGIC By default, MLflow registers models in the Databricks workspace model registry. To register models in Unity Catalog instead, we follow the [documentation](https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html) and set the registry server as Databricks Unity Catalog.
# MAGIC
# MAGIC In order to register a model in Unity Catalog, there are [several requirements](https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html#requirements), such as Unity Catalog must be enabled in your workspace.
# MAGIC
# COMMAND ----------
# Configure MLflow Python client to register model in Unity Catalog
import mlflow
mlflow.set_registry_uri("databricks-uc")
# COMMAND ----------
# Register model to Unity Catalog
# This may take 2 minutes to complete
registered_name = "models.default.mistral_7b_instruct" # Note that the UC model name follows the pattern <catalog_name>.<schema_name>.<model_name>, corresponding to the catalog, schema, and registered model name
result = mlflow.register_model(
"runs:/" + run.info.run_id + "/model",
registered_name,
)
# COMMAND ----------
from mlflow import MlflowClient
client = MlflowClient()
# Choose the right model version registered in the above cell.
client.set_registered_model_alias(name=registered_name, alias="Champion", version=result.version)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load the model from Unity Catalog
# COMMAND ----------
import mlflow
loaded_model = mlflow.pyfunc.load_model(f"models:/{registered_name}@Champion")
# Make a prediction using the loaded model
loaded_model.predict(
{"prompt": "What is large language model?"},
params={
"temperature": 0.5,
"max_new_tokens": 100,
}
)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create Optimized Model Serving Endpoint
# MAGIC Once the model is registered, we can use API to create a Databricks GPU Model Serving Endpoint that serves the `Mistral-7B-Instruct` model.
# MAGIC
# MAGIC Note that the below deployment requires GPU model serving. For more information on GPU model serving, see the [documentation](https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu). The feature is in Public Preview.
# MAGIC
# MAGIC Models in Mistral family are supported for Optimized LLM Serving, which provides an order of magnitude better throughput and latency improvement. For more information, see the [documentation](https://docs.databricks.com/en/machine-learning/model-serving/llm-optimized-model-serving.html). In this section, the endpoint will have optimized LLM serving enabled by default. To disable it, remove the `metadata = {"task": "llm/v1/completions"}` when calling `log_model` and run the notebook again.
# COMMAND ----------
# Provide a name to the serving endpoint
endpoint_name = 'mistral-7b-instruct'
# COMMAND ----------
databricks_url = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiUrl().getOrElse(None)
token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().getOrElse(None)
# COMMAND ----------
import requests
import json
deploy_headers = {'Authorization': f'Bearer {token}', 'Content-Type': 'application/json'}
deploy_url = f'{databricks_url}/api/2.0/serving-endpoints'
model_version = result # the returned result of mlflow.register_model
served_name = f'{model_version.name.replace(".", "_")}_{model_version.version}'
# Specify the type of compute (CPU, GPU_SMALL, GPU_MEDIUM, etc.)
# Choose `GPU_MEDIUM` on AWS, and `GPU_LARGE` on Azure
workload_type = "GPU_LARGE"
endpoint_config = {
"name": endpoint_name,
"config": {
"served_models": [{
"name": served_name,
"model_name": model_version.name,
"model_version": model_version.version,
"workload_type": workload_type,
"workload_size": "Small",
"scale_to_zero_enabled": "False"
}]
}
}
endpoint_json = json.dumps(endpoint_config, indent=' ')
# Send a POST request to the API
deploy_response = requests.request(method='POST', headers=deploy_headers, url=deploy_url, data=endpoint_json)
if deploy_response.status_code != 200:
raise Exception(f'Request failed with status {deploy_response.status_code}, {deploy_response.text}')
# Show the response of the POST request
# When first creating the serving endpoint, it should show that the state 'ready' is 'NOT_READY'
# You can check the status on the Databricks model serving endpoint page, it is expected to take ~35 min for the serving endpoint to become ready
print(deploy_response.json())