From f92b48075733f3fb367e4c218f8c58258dc0c1d9 Mon Sep 17 00:00:00 2001 From: Tim Sweeney Date: Wed, 3 Apr 2024 19:11:53 -0700 Subject: [PATCH] chore(weave): Pre-release doc script audit (#1476) * init * subtle fixes * minor style * give all example projects the same name * fix for model_output * a few more inits * a few more inits * added tuts * linted * fixed name --- docs/docs/guides/core-types/datasets.md | 1 + docs/docs/guides/core-types/models.md | 12 ++-- docs/docs/guides/tools/serve.md | 4 +- docs/docs/guides/tracking/objects.md | 18 ++--- docs/docs/guides/tracking/ops.md | 4 +- docs/docs/guides/tracking/tracing.md | 4 +- docs/docs/introduction.md | 2 - docs/docs/quickstart.md | 2 +- docs/docs/tutorial-eval.md | 15 ++-- tutorial_scripts/01_quickstart.py | 28 ++++++++ tutorial_scripts/02_in_app_quickstart.py | 28 ++++++++ tutorial_scripts/03_models.py | 32 +++++++++ tutorial_scripts/04_datasets.py | 36 ++++++++++ tutorial_scripts/05_eval_pipeline.py | 78 ++++++++++++++++++++ tutorial_scripts/06_eval_pipeline_all.py | 81 +++++++++++++++++++++ tutorial_scripts/07_objects.py | 10 +++ tutorial_scripts/08_ops.py | 10 +++ tutorial_scripts/09_tracing.py | 90 ++++++++++++++++++++++++ tutorial_scripts/10_model_serve.sh | 1 + tutorial_scripts/11_deploy.sh | 1 + 20 files changed, 432 insertions(+), 25 deletions(-) create mode 100644 tutorial_scripts/01_quickstart.py create mode 100644 tutorial_scripts/02_in_app_quickstart.py create mode 100644 tutorial_scripts/03_models.py create mode 100644 tutorial_scripts/04_datasets.py create mode 100644 tutorial_scripts/05_eval_pipeline.py create mode 100644 tutorial_scripts/06_eval_pipeline_all.py create mode 100644 tutorial_scripts/07_objects.py create mode 100644 tutorial_scripts/08_ops.py create mode 100644 tutorial_scripts/09_tracing.py create mode 100755 tutorial_scripts/10_model_serve.sh create mode 100644 tutorial_scripts/11_deploy.sh diff --git a/docs/docs/guides/core-types/datasets.md b/docs/docs/guides/core-types/datasets.md index 6266f5f4c31..7d63a47443b 100644 --- a/docs/docs/guides/core-types/datasets.md +++ b/docs/docs/guides/core-types/datasets.md @@ -9,6 +9,7 @@ hide_table_of_contents: true Easily update datasets with the UI and download the latest version locally with a simple API. This guide will show you how to: + - Publish `Dataset`s to W&B - Download the latest version - Iterate over examples diff --git a/docs/docs/guides/core-types/models.md b/docs/docs/guides/core-types/models.md index d35ac4aeb03..1825379539c 100644 --- a/docs/docs/guides/core-types/models.md +++ b/docs/docs/guides/core-types/models.md @@ -8,6 +8,7 @@ hide_table_of_contents: true A `Model` is a combination of data (which can include configuration, trained model weights, or other information) and code that defines how the model operates. By structuring your code to be compatible with this API, you benefit from a structured way to version your application so you can more systematically keep track of your experiments. To create a model in Weave, you need the following: + - a class that inherits from `weave.Model` - type definitions on all attributes - a typed `predict` function with `@weave.op()` decorator @@ -28,9 +29,10 @@ class YourModel(Model): ``` You can call the model as usual with: + ```python import weave -weave.init('project-name') +weave.init('intro-example') model = YourModel(attribute1='hello', attribute2=5) model.predict('world') @@ -41,12 +43,13 @@ This will track the model settings along with the inputs and outputs anytime you ## Automatic versioning of models When you change the attributes or the code that defines your model, these changes will be logged and the version will be updated. -This ensures that you can compare the predictions across different versions of your model. Use this to iterate on prompts or to try the latest LLM and compare predictions across different settings. +This ensures that you can compare the predictions across different versions of your model. Use this to iterate on prompts or to try the latest LLM and compare predictions across different settings. For example, here we create a new model: + ```python import weave -weave.init('project-name') +weave.init('intro-example') model = YourModel(attribute1='howdy', attribute2=10) model.predict('world') @@ -57,6 +60,7 @@ After calling this, you will see that you now have two versions of this Model in ## Serve models To serve a model, you can easily spin up a FastAPI server by calling: + ```bash weave serve ``` @@ -65,7 +69,7 @@ For additional instructions, see [serve](/guides/tools/serve). ## Track production calls -To separate production calls, you can add an additional attribute to the predictions for easy filtering in the UI or API. +To separate production calls, you can add an additional attribute to the predictions for easy filtering in the UI or API. ```python with weave.attributes({'env': 'production'}): diff --git a/docs/docs/guides/tools/serve.md b/docs/docs/guides/tools/serve.md index 54292017fd5..59587a4eb6b 100644 --- a/docs/docs/guides/tools/serve.md +++ b/docs/docs/guides/tools/serve.md @@ -11,7 +11,7 @@ Given a Weave ref to any Weave Model you can run: weave serve ``` -to run a FastAPI server for that model. +to run a FastAPI server for that model. Visit [http://0.0.0.0:9996/docs](http://0.0.0.0:9996/docs) to query the model interactively. ## Install FastAPI @@ -22,6 +22,7 @@ pip install fastapi uvicorn ## Serve Model In a terminal, call: + ```bash weave serve ``` @@ -30,4 +31,3 @@ Get your model ref by navigating to the model and copying it from the UI. It sho `weave:///your_entity/project-name/YourModel:` To use it, navigate to the Swagger UI link, click the predict endpoint and then click "Try it out!". - \ No newline at end of file diff --git a/docs/docs/guides/tracking/objects.md b/docs/docs/guides/tracking/objects.md index 0eed2aa7212..b32bd479752 100644 --- a/docs/docs/guides/tracking/objects.md +++ b/docs/docs/guides/tracking/objects.md @@ -10,8 +10,9 @@ Weave's serialization layer saves and versions Python objects. ## Publishing an object ```python -# Initialize tracking to the project 'cat-project' -weave.init('cat-project') +import weave +# Initialize tracking to the project 'intro-example' +weave.init('intro-example') # Save a list, giving it the name 'cat-names' weave.publish(['felix', 'jimbo', 'billie'], 'cat-names') ``` @@ -23,8 +24,9 @@ Saving an object with a name will create the first version of that object if it `weave.publish` returns a Ref. You can call `.get()` on any Ref to get the object back. You can construct a ref and then fetch the object back. + ```python -weave.init('cat-project') +weave.init('intro-example') cat_names = weave.ref('cat-names').get() ``` @@ -36,11 +38,10 @@ A fully qualified weave object ref uri looks like this: weave://///object/: ``` -- *entity*: wandb entity (username or team) -- *project*: wandb project -- *object_name*: object name -- *object_version*: either a version hash, a string like v0, v1..., or an alias like ":latest". All objects have the ":latest" alias. - +- _entity_: wandb entity (username or team) +- _project_: wandb project +- _object_name_: object name +- _object_version_: either a version hash, a string like v0, v1..., or an alias like ":latest". All objects have the ":latest" alias. Refs can be constructed with a few different styles @@ -48,7 +49,6 @@ Refs can be constructed with a few different styles - `weave.ref(:)`: requires `weave.init()` to have been called. - `weave.ref()`: can be constructed without calling weave.init - ## TODO - iterating through other versions of an object diff --git a/docs/docs/guides/tracking/ops.md b/docs/docs/guides/tracking/ops.md index cd45270c6b9..5b39e6b9668 100644 --- a/docs/docs/guides/tracking/ops.md +++ b/docs/docs/guides/tracking/ops.md @@ -10,11 +10,13 @@ A Weave op is a versioned function that automatically logs all calls. To create an op, decorate a python function with `weave.op()` ```python +import weave + @weave.op() def track_me(v): return v + 5 -weave.init('add5-GPT') +weave.init('intro-example') track_me(15) ``` diff --git a/docs/docs/guides/tracking/tracing.md b/docs/docs/guides/tracking/tracing.md index 72eabbe7111..244b442008d 100644 --- a/docs/docs/guides/tracking/tracing.md +++ b/docs/docs/guides/tracking/tracing.md @@ -57,14 +57,14 @@ def pokedex(name: str, prompt: str) -> str: return response.choices[0].message.content # highlight-next-line -weave.init('pokedex') +weave.init('intro-example') # Get data for a specific Pokémon pokemon_data = pokedex(random.choice(POKEMON), PROMPT) ``` ## Add additional attributes -When calling tracked functions, you can add additional metadata to the call by using the `weave.attributes` context manager. +When calling tracked functions, you can add additional metadata to the call by using the `weave.attributes` context manager. For example, you can add a `user_id` to each call and then filter calls by user. In the example below, any function called within the context manager will have the `user_id` attribute set to `lukas` and `env` attribute set to `production`. diff --git a/docs/docs/introduction.md b/docs/docs/introduction.md index d623fb1eab3..846528a3924 100644 --- a/docs/docs/introduction.md +++ b/docs/docs/introduction.md @@ -7,8 +7,6 @@ hide_table_of_contents: true # Introduction -*🍲 This version of Weave is pre-release software. 🍲* - Weave is a toolkit for developing Generative AI applications, built by [Weights & Biases](https://wandb.ai). Our goal is to bring rigor, best-practices, and composability to the inherently experimental process of developing Generative AI software, without introducing cognitive overhead. diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md index 689d9d12d33..cbba6e8fec6 100644 --- a/docs/docs/quickstart.md +++ b/docs/docs/quickstart.md @@ -52,7 +52,7 @@ sentence = "There are many fruits that were found on the recently discovered pla extract_fruit(sentence) ``` -Now, every time you call this function, weave will automatically capture the input & output data and log any changes to the code. +Now, every time you call this function, weave will automatically capture the input & output data and log any changes to the code. Run this application and your console will output a link to view it within W&B. :::note diff --git a/docs/docs/tutorial-eval.md b/docs/docs/tutorial-eval.md index 09037fa23e2..8c2a975a901 100644 --- a/docs/docs/tutorial-eval.md +++ b/docs/docs/tutorial-eval.md @@ -52,6 +52,11 @@ class ExtractFruitsModel(weave.Model): You can instantiate `Model` objects as normal like this: ```python +import asyncio +import weave + +weave.init('intro-example') + model = ExtractFruitsModel(model_name='gpt-3.5-turbo-1106', prompt_template='Extract fields ("fruit": , "color": , "flavor": ) from the following text, as json: {sentence}') sentence = "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy." @@ -94,9 +99,11 @@ Here `sentence` is passed to the model's predict function, and `target` is used import weave from weave.flow.scorer import MultiTaskBinaryClassificationF1 +weave.init('intro-example') + @weave.op() -def fruit_name_score(target: dict, prediction: dict) -> dict: - return {'correct': target['fruit'] == prediction['fruit']} +def fruit_name_score(target: dict, model_output: dict) -> dict: + return {'correct': target['fruit'] == model_output['fruit']} # highlight-next-line evaluation = weave.Evaluation( @@ -173,8 +180,8 @@ examples = [ # We define a scoring functions to compare our model predictions with a ground truth label. @weave.op() -def fruit_name_score(target: dict, prediction: dict) -> dict: - return {'correct': target['fruit'] == prediction['fruit']} +def fruit_name_score(target: dict, model_output: dict) -> dict: + return {'correct': target['fruit'] == model_output['fruit']} # Finally, we run an evaluation of this model. # This will generate a prediction for each input example, and then score it with each scoring function. diff --git a/tutorial_scripts/01_quickstart.py b/tutorial_scripts/01_quickstart.py new file mode 100644 index 00000000000..8d5d20b196a --- /dev/null +++ b/tutorial_scripts/01_quickstart.py @@ -0,0 +1,28 @@ +import weave +import json +from openai import OpenAI + + +@weave.op() +def extract_fruit(sentence: str) -> dict: + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-3.5-turbo-1106", + messages=[ + { + "role": "system", + "content": "You will be provided with unstructured data, and your task is to parse it one JSON dictionary with fruit, color and flavor as keys.", + }, + {"role": "user", "content": sentence}, + ], + temperature=0.7, + response_format={"type": "json_object"}, + ) + extracted = response.choices[0].message.content + return json.loads(extracted) + + +weave.init("intro-example") +sentence = "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy." +extract_fruit(sentence) diff --git a/tutorial_scripts/02_in_app_quickstart.py b/tutorial_scripts/02_in_app_quickstart.py new file mode 100644 index 00000000000..900f9188d63 --- /dev/null +++ b/tutorial_scripts/02_in_app_quickstart.py @@ -0,0 +1,28 @@ +import weave +import json +from openai import OpenAI + + +@weave.op() # 🐝 +def extract_fruit(sentence: str) -> dict: + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-3.5-turbo-1106", + messages=[ + { + "role": "system", + "content": "You will be provided with unstructured data, and your task is to parse it one JSON dictionary with fruit, color and flavor as keys.", + }, + {"role": "user", "content": sentence}, + ], + temperature=0.7, + response_format={"type": "json_object"}, + ) + extracted = response.choices[0].message.content + return json.loads(extracted) + + +weave.init("intro-example") # 🐝 +sentence = "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy." +extract_fruit(sentence) diff --git a/tutorial_scripts/03_models.py b/tutorial_scripts/03_models.py new file mode 100644 index 00000000000..a9ced442bcd --- /dev/null +++ b/tutorial_scripts/03_models.py @@ -0,0 +1,32 @@ +from weave import Model +import weave + + +class YourModel(Model): + attribute1: str + attribute2: int + + @weave.op() + def predict(self, input_data: str) -> dict: + # Model logic goes here + prediction = self.attribute1 + " " + input_data + return {"pred": prediction} + + +import weave + +weave.init("intro-example") + +model = YourModel(attribute1="hello", attribute2=5) +model.predict("world") + +import weave + +weave.init("intro-example") + +model = YourModel(attribute1="howdy", attribute2=10) +model.predict("world") + + +with weave.attributes({"env": "production"}): + model.predict("world") diff --git a/tutorial_scripts/04_datasets.py b/tutorial_scripts/04_datasets.py new file mode 100644 index 00000000000..eb0598f0fe2 --- /dev/null +++ b/tutorial_scripts/04_datasets.py @@ -0,0 +1,36 @@ +import weave +from weave import Dataset + +# Initialize Weave +weave.init("intro-example") + +# Create a dataset +dataset = Dataset( + name="grammar", + rows=[ + { + "id": "0", + "sentence": "He no likes ice cream.", + "correction": "He doesn't like ice cream.", + }, + { + "id": "1", + "sentence": "She goed to the store.", + "correction": "She went to the store.", + }, + { + "id": "2", + "sentence": "They plays video games all day.", + "correction": "They play video games all day.", + }, + ], +) + +# Publish the dataset +weave.publish(dataset) + +# Retrieve the dataset +dataset_ref = weave.ref("grammar").get() + +# Access a specific example +example_label = dataset_ref.rows[2]["sentence"] diff --git a/tutorial_scripts/05_eval_pipeline.py b/tutorial_scripts/05_eval_pipeline.py new file mode 100644 index 00000000000..a954ee35b9f --- /dev/null +++ b/tutorial_scripts/05_eval_pipeline.py @@ -0,0 +1,78 @@ +import json +import openai +import weave + +weave.init("intro-example") + + +class ExtractFruitsModel(weave.Model): + model_name: str + prompt_template: str + + @weave.op() + async def predict(self, sentence: str) -> dict: + client = openai.AsyncClient() + + response = await client.chat.completions.create( + model=self.model_name, + messages=[ + { + "role": "user", + "content": self.prompt_template.format(sentence=sentence), + } + ], + ) + result = response.choices[0].message.content + if result is None: + raise ValueError("No response from model") + parsed = json.loads(result) + return parsed + + +import asyncio + +model = ExtractFruitsModel( + model_name="gpt-3.5-turbo-1106", + prompt_template='Extract fields ("fruit": , "color": , "flavor": ) from the following text, as json: {sentence}', +) +sentence = "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy." +print(asyncio.run(model.predict(sentence))) +# if you're in a Jupyter Notebook, run: +# await model.predict(sentence) + + +sentences = [ + "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy.", + "Pounits are a bright green color and are more savory than sweet.", + "Finally, there are fruits called glowls, which have a very sour and bitter taste which is acidic and caustic, and a pale orange tinge to them.", +] +labels = [ + {"fruit": "neoskizzles", "color": "purple", "flavor": "candy"}, + {"fruit": "pounits", "color": "bright green", "flavor": "savory"}, + {"fruit": "glowls", "color": "pale orange", "flavor": "sour and bitter"}, +] +examples = [ + {"id": "0", "sentence": sentences[0], "target": labels[0]}, + {"id": "1", "sentence": sentences[1], "target": labels[1]}, + {"id": "2", "sentence": sentences[2], "target": labels[2]}, +] + +import weave +from weave.flow.scorer import MultiTaskBinaryClassificationF1 + + +@weave.op() +def fruit_name_score(target: dict, model_output: dict) -> dict: + return {"correct": target["fruit"] == model_output["fruit"]} + + +evaluation = weave.Evaluation( + dataset=examples, + scorers=[ + MultiTaskBinaryClassificationF1(class_names=["fruit", "color", "flavor"]), + fruit_name_score, + ], +) +print(asyncio.run(evaluation.evaluate(model))) +# if you're in a Jupyter Notebook, run: +# await evaluation.evaluate(model) diff --git a/tutorial_scripts/06_eval_pipeline_all.py b/tutorial_scripts/06_eval_pipeline_all.py new file mode 100644 index 00000000000..3dc6535d67c --- /dev/null +++ b/tutorial_scripts/06_eval_pipeline_all.py @@ -0,0 +1,81 @@ +import json +import asyncio +import weave +from weave.flow.scorer import MultiTaskBinaryClassificationF1 +import openai + +# We create a model class with one predict function. +# All inputs, predictions and parameters are automatically captured for easy inspection. + + +class ExtractFruitsModel(weave.Model): + model_name: str + prompt_template: str + + @weave.op() + async def predict(self, sentence: str) -> dict: + client = openai.AsyncClient() + + response = await client.chat.completions.create( + model=self.model_name, + messages=[ + { + "role": "user", + "content": self.prompt_template.format(sentence=sentence), + } + ], + response_format={"type": "json_object"}, + ) + result = response.choices[0].message.content + if result is None: + raise ValueError("No response from model") + parsed = json.loads(result) + return parsed + + +# We call init to begin capturing data in the project, intro-example. +weave.init("intro-example") + +# We create our model with our system prompt. +model = ExtractFruitsModel( + name="gpt4", + model_name="gpt-4-0125-preview", + prompt_template='Extract fields ("fruit": , "color": , "flavor") from the following text, as json: {sentence}', +) +sentences = [ + "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy.", + "Pounits are a bright green color and are more savory than sweet.", + "Finally, there are fruits called glowls, which have a very sour and bitter taste which is acidic and caustic, and a pale orange tinge to them.", +] +labels = [ + {"fruit": "neoskizzles", "color": "purple", "flavor": "candy"}, + {"fruit": "pounits", "color": "bright green", "flavor": "savory"}, + {"fruit": "glowls", "color": "pale orange", "flavor": "sour and bitter"}, +] +examples = [ + {"id": "0", "sentence": sentences[0], "target": labels[0]}, + {"id": "1", "sentence": sentences[1], "target": labels[1]}, + {"id": "2", "sentence": sentences[2], "target": labels[2]}, +] +# If you have already published the Dataset, you can run: +# dataset = weave.ref('example_labels').get() + +# We define a scoring functions to compare our model predictions with a ground truth label. +@weave.op() +def fruit_name_score(target: dict, model_output: dict) -> dict: + return {"correct": target["fruit"] == model_output["fruit"]} + + +# Finally, we run an evaluation of this model. +# This will generate a prediction for each input example, and then score it with each scoring function. +evaluation = weave.Evaluation( + name="fruit_eval", + dataset=examples, + scorers=[ + MultiTaskBinaryClassificationF1(class_names=["fruit", "color", "flavor"]), + fruit_name_score, + ], +) +print(asyncio.run(evaluation.evaluate(model))) +# if you're in a Jupyter Notebook, run: +# await evaluation.evaluate(model) diff --git a/tutorial_scripts/07_objects.py b/tutorial_scripts/07_objects.py new file mode 100644 index 00000000000..d4ae79a1393 --- /dev/null +++ b/tutorial_scripts/07_objects.py @@ -0,0 +1,10 @@ +import weave + +# Initialize tracking to the project 'intro-example' +weave.init("intro-example") +# Save a list, giving it the name 'cat-names' +weave.publish(["felix", "jimbo", "billie"], "cat-names") + + +weave.init("intro-example") +cat_names = weave.ref("cat-names").get() diff --git a/tutorial_scripts/08_ops.py b/tutorial_scripts/08_ops.py new file mode 100644 index 00000000000..d01299b75a8 --- /dev/null +++ b/tutorial_scripts/08_ops.py @@ -0,0 +1,10 @@ +import weave + + +@weave.op() +def track_me(v): + return v + 5 + + +weave.init("intro-example") +track_me(15) diff --git a/tutorial_scripts/09_tracing.py b/tutorial_scripts/09_tracing.py new file mode 100644 index 00000000000..8843c6ca580 --- /dev/null +++ b/tutorial_scripts/09_tracing.py @@ -0,0 +1,90 @@ +import weave +from openai import OpenAI +import requests, random + +PROMPT = """Emulate the Pokedex from early Pokémon episodes. State the name of the Pokemon and then describe it. + Your tone is informative yet sassy, blending factual details with a touch of dry humor. Be concise, no more than 3 sentences. """ +POKEMON = [ + "pikachu", + "charmander", + "squirtle", + "bulbasaur", + "jigglypuff", + "meowth", + "eevee", +] + + +@weave.op() +def get_pokemon_data(pokemon_name): + url = f"https://pokeapi.co/api/v2/pokemon/{pokemon_name}" + response = requests.get(url) + if response.status_code == 200: + data = response.json() + name = data["name"] + types = [t["type"]["name"] for t in data["types"]] + species_url = data["species"]["url"] + species_response = requests.get(species_url) + evolved_from = "Unknown" + if species_response.status_code == 200: + species_data = species_response.json() + if species_data["evolves_from_species"]: + evolved_from = species_data["evolves_from_species"]["name"] + return {"name": name, "types": types, "evolved_from": evolved_from} + else: + return None + + +@weave.op() +def pokedex(name: str, prompt: str) -> str: + client = OpenAI() + data = get_pokemon_data(name) + if not data: + return "Error: Unable to fetch data" + response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": prompt}, + {"role": "user", "content": str(data)}, + ], + temperature=0.7, + max_tokens=100, + top_p=1, + ) + return response.choices[0].message.content + + +weave.init("intro-example") +# Get data for a specific Pokémon +pokemon_data = pokedex(random.choice(POKEMON), PROMPT) + +import weave +import json +from openai import OpenAI + + +@weave.op() +def extract_fruit(sentence: str) -> dict: + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-3.5-turbo-1106", + messages=[ + { + "role": "system", + "content": "You will be provided with unstructured data, and your task is to parse it one JSON dictionary with fruit, color and flavor as keys.", + }, + {"role": "user", "content": sentence}, + ], + temperature=0.7, + response_format={"type": "json_object"}, + ) + extracted = response.choices[0].message.content + return json.loads(extracted) + + +weave.init("intro-example") +sentence = "There are many fruits that were found on the recently discovered planet Goocrux. There are neoskizzles that grow there, which are purple and taste like candy." + +with weave.attributes({"user_id": "lukas", "env": "production"}): + extract_fruit(sentence) diff --git a/tutorial_scripts/10_model_serve.sh b/tutorial_scripts/10_model_serve.sh new file mode 100755 index 00000000000..6e9f6380910 --- /dev/null +++ b/tutorial_scripts/10_model_serve.sh @@ -0,0 +1 @@ +weave serve diff --git a/tutorial_scripts/11_deploy.sh b/tutorial_scripts/11_deploy.sh new file mode 100644 index 00000000000..680d9cb33cf --- /dev/null +++ b/tutorial_scripts/11_deploy.sh @@ -0,0 +1 @@ +weave deploy gcp