Skip to content

Commit

Permalink
Merge branch 'Azure:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
JackTn authored Aug 26, 2024
2 parents c221294 + 30d040e commit 2b91022
Show file tree
Hide file tree
Showing 526 changed files with 68,498 additions and 2,857 deletions.
3 changes: 0 additions & 3 deletions documentation/onboard-dpg-in-sdkautomation/java/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
See [Use SDK Automation from REST API specifications](https://github.com/Azure/azure-sdk-for-java/wiki/TypeSpec-Java-Quickstart#use-sdk-automation-from-rest-api-specifications) in TypeSpec Java QuickStart.

`flavor` is always `azure` for Azure SDK.
`examples-directory` is same as that in e.g. typespec-autorest emitter.

## Example for Java data-plane SDK

Expand All @@ -16,7 +15,6 @@ See [Use SDK Automation from REST API specifications](https://github.com/Azure/a
package-dir: "azure-ai-openai"
flavor: azure
namespace: "com.azure.ai.openai"
examples-directory: "{project-root}/examples"
```
## Example for Java management-plane SDK
Expand All @@ -33,7 +31,6 @@ See [Use SDK Automation from REST API specifications](https://github.com/Azure/a
flavor: "azure"
namespace: "com.azure.resourcemanager.standbypool"
service-name: "Standby Pool"
examples-directory: "{project-root}/examples"
```

# Add AutoRest Configuration for Java SDK
Expand Down
201 changes: 196 additions & 5 deletions specification/ai/Face/client.tsp

Large diffs are not rendered by default.

50 changes: 36 additions & 14 deletions specification/ai/Face/models.common.tsp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import "@typespec/http";
import "@typespec/rest";
import "@azure-tools/typespec-azure-core";
import "@azure-tools/typespec-client-generator-core";

using Azure.Core;
using Azure.ClientGenerator.Core;
using TypeSpec.Http;
using TypeSpec.Rest;

Expand Down Expand Up @@ -64,7 +66,8 @@ union DetectionModel {
"detection_03",
}

alias UserDefinedFields = {
@doc("User defined fields for object creation.")
model UserDefinedFields {
@doc("User defined name, maximum length is 128.")
@maxLength(128)
@minLength(1)
Expand All @@ -73,9 +76,10 @@ alias UserDefinedFields = {
@doc("Optional user defined data. Length should not exceed 16K.")
@maxLength(16384)
userData?: string;
};
}

alias UserDefinedFieldsForUpdate = {
@doc("User defined fields for object update.")
model UserDefinedFieldsForUpdate {
@doc("User defined name, maximum length is 128.")
@maxLength(128)
@minLength(1)
Expand All @@ -84,7 +88,7 @@ alias UserDefinedFieldsForUpdate = {
@doc("Optional user defined data. Length should not exceed 16K.")
@maxLength(16384)
userData?: string;
};
}

@doc("Common model for face list and person group.")
model BaseCollection {
Expand All @@ -94,18 +98,20 @@ model BaseCollection {
recognitionModel?: RecognitionModel;
}

alias CreateCollectionOptions = {
@doc("Model for creating face collection.")
model CreateCollectionRequest {
...UserDefinedFields;

@doc("The 'recognitionModel' associated with this face list. Supported 'recognitionModel' values include 'recognition_01', 'recognition_02, 'recognition_03', and 'recognition_04'. The default value is 'recognition_01'. 'recognition_04' is recommended since its accuracy is improved on faces wearing masks compared with 'recognition_03', and its overall accuracy is improved compared with 'recognition_01' and 'recognition_02'.")
recognitionModel?: RecognitionModel = "recognition_01";
};
}

alias FaceUserData = {
@doc("User defined data for persisted face.")
model FaceUserData {
@doc("User-provided data attached to the face. The length limit is 1K.")
@maxLength(1024)
userData?: string;
};
}

@doc("Common model for persisted face.")
@resource("persistedfaces")
Expand All @@ -118,7 +124,8 @@ model BaseFace {
...FaceUserData;
}

alias AddFaceOptions = {
@doc("Query parameters for add face.")
model AddFaceOptions {
@doc("A face rectangle to specify the target face to be added to a person, in the format of 'targetFace=left,top,width,height'.")
@query
@maxItems(4)
Expand All @@ -133,7 +140,25 @@ alias AddFaceOptions = {
@doc("User-provided data attached to the face. The size limit is 1K.")
@maxLength(1024)
userData?: string;
};
}

@doc("Add face from stream request.")
model AddFaceRequest is AddFaceOptions {
@doc("The format of the HTTP payload.")
@header
contentType: "application/octet-stream";

@doc("The image to be analyzed")
@body
imageContent: bytes;
}

@doc("Add face from url request.")
model AddFaceFromUrlRequest is AddFaceOptions {
@clientName("uri", "csharp")
@doc("URL of input image.")
url: url;
}

@doc("Response body for adding face.")
model AddFaceResult {
Expand Down Expand Up @@ -257,15 +282,12 @@ model GroupingResult {
}

alias AddFaceDescriptionInList = """
>
*
* Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.
* Each person entry can hold up to 248 faces.
* JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.
* "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided "targetFace" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.
* Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.
* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model
* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).
""";

alias ListRequestOptionsDescriptionInList = """
Expand Down
2 changes: 1 addition & 1 deletion specification/ai/Face/models.detect.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ using TypeSpec.Rest;
namespace Face;

alias FaceDetectionOptions = {
@doc("The 'detectionModel' associated with the detected faceIds. Supported 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default value is 'detection_01'.")
@doc("The 'detectionModel' associated with the detected faceIds. Supported 'detectionModel' values include 'detection_01', 'detection_02' and 'detection_03'. The default value is 'detection_01'. 'detection_03' is recommended since its accuracy is improved on smaller faces (64x64 pixels) and rotated face orientations.")
@query
detectionModel?: DetectionModel = "detection_01";

Expand Down
20 changes: 3 additions & 17 deletions specification/ai/Face/routes.common.tsp
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import "@typespec/http";
import "@typespec/rest";
import "@azure-tools/typespec-azure-core";
import "@azure-tools/typespec-client-generator-core";
import "./models.common.tsp";
import "./models.detect.tsp";

using TypeSpec.Http;
using TypeSpec.Rest;
using Azure.ClientGenerator.Core;
using Azure.Core;
using Azure.Core.Traits;
using Foundations;
Expand Down Expand Up @@ -37,7 +35,7 @@ op FaceResourceCreateWithServiceProvidedName<
@createsOrReplacesResource(TResource)
op FaceCollectionResourceCreateOperation<TResource extends TypeSpec.Reflection.Model> is Foundations.ResourceOperation<
TResource,
CreateCollectionOptions,
CreateCollectionRequest,
TypeSpec.Http.OkResponse,
ServiceTraits,
FaceErrorResponse
Expand All @@ -49,15 +47,7 @@ alias AddFaceSuccess = "A successful call returns a new persistedFaceId.";
@createsResource(TFace)
op AddFace<TFace extends BaseFace> is Foundations.ResourceCollectionOperation<
TFace,
AddFaceOptions & {
@doc("The format of the HTTP payload.")
@header
contentType: "application/octet-stream";

@doc("The image to be analyzed")
@body
imageContent: bytes;
},
AddFaceRequest,
AddFaceResult,
ServiceTraits,
FaceErrorResponse
Expand All @@ -67,11 +57,7 @@ op AddFace<TFace extends BaseFace> is Foundations.ResourceCollectionOperation<
@createsResource(TFace)
op AddFaceFromUrl<TFace extends BaseFace> is Foundations.ResourceCollectionOperation<
TFace,
AddFaceOptions & {
@clientName("uri", "csharp")
@doc("URL of input image.")
url: url;
},
AddFaceFromUrlRequest,
AddFaceResult,
ServiceTraits,
FaceErrorResponse
Expand Down
8 changes: 3 additions & 5 deletions specification/ai/Face/routes.detection.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace Face;
@summary("Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.")
@doc("""
> [!IMPORTANT]
> Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email Azure Face API <[email protected]> if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/.
> Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:[email protected]) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).
*
* No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.
Expand All @@ -26,10 +26,8 @@ namespace Face;
* The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
* Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.
* For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).
* Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model
* 'detection_02': Face attributes and landmarks are disabled if you choose this detection model.
* 'detection_03': Face attributes (mask, blur, and headPose) and landmarks are supported if you choose this detection model.
* Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model.
* Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).
* Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).
""")
@returnsDoc("A successful call returns an array of face entries ranked by face rectangle size in descending order. An empty response indicates no faces detected.")
@post
Expand Down
6 changes: 6 additions & 0 deletions specification/ai/Face/routes.facelist.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,19 @@ alias AddFaceListFaceDescription = """
To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List Face\" or \"Delete Face List\" is called.
Note that persistedFaceId is different from faceId generated by \"Detect\".
>
*
${AddFaceDescriptionInList}
""";
alias AddLargeFaceListFaceSummary = "Add a face to a specified Large Face List, up to 1,000,000 faces.";
alias AddLargeFaceListFaceDescription = """
To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Face List Face\" or \"Delete Large Face List\" is called.
Note that persistedFaceId is different from faceId generated by \"Detect\".
>
*
${AddFaceDescriptionInList}
> [!NOTE]
Expand Down
5 changes: 4 additions & 1 deletion specification/ai/Face/routes.persondirectory.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,11 @@ namespace Face;
To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until Person Directory \"Delete Person Face\" or \"Delete Person\" is called.
Note that persistedFaceId is different from faceId generated by \"Detect\".
${AddFaceDescriptionInList}
>
*
* Each person entry can hold up to 248 faces.
${AddFaceDescriptionInList}
* Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.
* This is a long running operation. Use Response Header "Operation-Location" to determine when the AddFace operation has successfully propagated for future requests to \"Identify\". For further information about Operation-Locations see \"Get Face Operation Status\".
""")
Expand Down
8 changes: 8 additions & 0 deletions specification/ai/Face/routes.persongroup.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,21 @@ alias AddPersonGroupPersonFaceDescription = """
To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Person Group Person Face\", \"Delete Person Group Person\" or \"Delete Person Group\" is called.
Note that persistedFaceId is different from faceId generated by \"Detect\".
>
*
* Each person entry can hold up to 248 faces.
${AddFaceDescriptionInList}
""";
alias AddLargePersonGroupPersonFaceSummary = "Add a face to a person into a Large Person Group for face identification or verification.";
alias AddLargePersonGroupPersonFaceDescription = """
To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Person Group Person Face\", \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.
Note that persistedFaceId is different from faceId generated by \"Detect\".
>
*
* Each person entry can hold up to 248 faces.
${AddFaceDescriptionInList}
""";
alias CreatePersonSuccess = "A successful call returns a new personId created.";
Expand Down
2 changes: 1 addition & 1 deletion specification/ai/OpenAI.Assistants/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@ title: OpenAIAssistants
clear-output-folder: false
guessResourceKey: true
isAzureSpec: true
namespace: azure.ai.openai.assistants
namespace: azure.ai.openai.assistants
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
{
"title": "Cancels a run that is `in_progress`.\n",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2024-07-01-preview",
"threadId": "thread_abc123",
"runId": "run_abc123"
},
"responses": {
"200": {
"body": {
"id": "run_abc123",
"object": "thread.run",
"created_at": 1699076126,
"assistant_id": "asst_abc123",
"thread_id": "thread_abc123",
"status": "cancelling",
"started_at": 1699076126,
"expires_at": 1699076726,
"cancelled_at": null,
"failed_at": null,
"completed_at": null,
"last_error": null,
"model": "gpt-4-turbo",
"instructions": "You summarize books.",
"tools": [
{
"type": "file_search"
}
],
"tool_choice": "auto",
"truncation_strategy": {
"type": "auto",
"last_messages": null
},
"max_completion_tokens": 1000,
"max_prompt_tokens": 1000,
"incomplete_details": null,
"metadata": {},
"usage": null,
"temperature": 1.0,
"top_p": 1.0,
"response_format": "auto"
}
}
},
"operationId": "CancelRun"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"title": "Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.",
"parameters": {
"endpoint": "{endpoint}",
"api-version": "2024-07-01-preview",
"vectorStoreId": "vs_abc123",
"batchId": "vsfb_abc123"
},
"responses": {
"200": {
"body": {
"id": "vsfb_abc123",
"object": "vector_store.files_batch",
"created_at": 1699061776,
"vector_store_id": "vs_abc123",
"status": "cancelling",
"file_counts": {
"in_progress": 12,
"completed": 3,
"failed": 0,
"cancelled": 0,
"total": 15
}
}
}
},
"operationId": "CancelVectorStoreFileBatch"
}
Loading

0 comments on commit 2b91022

Please sign in to comment.